Home Home > GIT Browse > stable
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetr Tesarik <ptesarik@suse.cz>2019-06-07 09:29:52 +0200
committerPetr Tesarik <ptesarik@suse.cz>2019-06-07 09:29:52 +0200
commit96baba769965571aa583c8bb5087f1b40417f58b (patch)
treed150d3177396fe714c170edbb255a4fa339d534d
parentc40a30a6f91db538ac7bcc6518cee624025c47b2 (diff)
parent2a6cc8a4c953628d2c107ffe99a6fb724e7b487d (diff)
Merge branch 'SLE15-SP1' into SLE12-SP5
- Delete patches.kabi/* Conflicts: kabi/severities patches.kabi/arch-arm64-acpi-KABI-ignore-includes.patch series.conf suse-commit: 187af105002d4396a677826f774d8f73cd303ebe
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu3
-rw-r--r--Documentation/admin-guide/hw-vuln/mds.rst307
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ti,wilink-st.txt6
-rw-r--r--Documentation/devicetree/bindings/rtc/sun6i-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cx2072x.txt36
-rw-r--r--Documentation/networking/ip-sysctl.txt1
-rw-r--r--Documentation/perf/thunderx2-pmu.txt41
-rw-r--r--arch/arm/mach-iop13xx/setup.c8
-rw-r--r--arch/arm/mach-iop13xx/tpmi.c10
-rw-r--r--arch/arm/plat-iop/adma.c6
-rw-r--r--arch/arm/plat-orion/common.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/mm/gup.c9
-rw-r--r--arch/x86/entry/entry_64.S18
-rw-r--r--arch/x86/include/asm/text-patching.h28
-rw-r--r--arch/x86/kernel/cpu/hygon.c5
-rw-r--r--arch/x86/kernel/ftrace.c32
-rw-r--r--arch/x86/mm/gup.c14
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c420
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h2
-rw-r--r--drivers/crypto/chelsio/chcr_core.c191
-rw-r--r--drivers/crypto/chelsio/chcr_core.h44
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h10
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c204
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c6
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c12
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c1
-rw-r--r--drivers/crypto/vmx/aes.c7
-rw-r--r--drivers/crypto/vmx/aes_cbc.c7
-rw-r--r--drivers/crypto/vmx/aes_ctr.c5
-rw-r--r--drivers/crypto/vmx/aes_xts.c9
-rw-r--r--drivers/crypto/vmx/ghash.c213
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/hid/hid-core.c36
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c184
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h16
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h1
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c76
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h7
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c179
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h12
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c72
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c3
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c53
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h35
-rw-r--r--drivers/infiniband/hw/hfi1/init.c7
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c31
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h5
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c22
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c36
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c10
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h4
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c33
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c9
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c4
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c18
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c26
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c12
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c20
-rw-r--r--drivers/infiniband/hw/qedr/main.c25
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c19
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c26
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_cq.h10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c3
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c178
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c117
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c200
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c139
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c10
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c33
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c355
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c481
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c326
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c476
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c63
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c180
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c113
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h126
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h269
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c510
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c1392
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c551
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c1042
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1108
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1025
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c88
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c450
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c541
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c751
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h106
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c794
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h28
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c70
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile3
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h102
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c76
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h29
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h21
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c1863
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c565
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c301
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1617
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h125
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c55
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c217
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c35
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c406
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c190
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h72
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c14
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c43
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c264
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c298
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c44
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c8
-rw-r--r--drivers/net/phy/marvell.c6
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c45
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c19
-rw-r--r--drivers/nvme/host/core.c10
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c5
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c4
-rw-r--r--drivers/pci/pcie/aspm.c49
-rw-r--r--drivers/pci/quirks.c17
-rw-r--r--drivers/pci/switch/switchtec.c3
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/thunderx2_pmu.c861
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c3
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c158
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c51
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c473
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c101
-rw-r--r--drivers/scsi/libsas/sas_init.c42
-rw-r--r--drivers/scsi/libsas/sas_phy.c7
-rw-r--r--drivers/scsi/libsas/sas_port.c24
-rw-r--r--drivers/scsi/qedf/qedf_els.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c115
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h38
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c92
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c390
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c235
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c237
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/video/fbdev/core/fbcmap.c2
-rw-r--r--drivers/video/fbdev/core/modedb.c3
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/btrfs/extent-tree.c44
-rw-r--r--fs/btrfs/file.c16
-rw-r--r--fs/btrfs/inode.c14
-rw-r--r--fs/btrfs/qgroup.c8
-rw-r--r--fs/btrfs/relocation.c27
-rw-r--r--fs/btrfs/tree-checker.c49
-rw-r--r--fs/btrfs/tree-log.c20
-rw-r--r--fs/configfs/dir.c14
-rw-r--r--fs/ext4/file.c7
-rw-r--r--fs/ext4/inode.c25
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/fs-writeback.c11
-rw-r--r--fs/fuse/dev.c12
-rw-r--r--fs/jbd2/journal.c4
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/ocfs2/export.c30
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/splice.c12
-rw-r--r--fs/sync.c15
-rw-r--r--fs/xfs/xfs_file.c27
-rw-r--r--include/dt-bindings/clock/r8a7795-cpg-mssr.h2
-rw-r--r--include/dt-bindings/clock/r8a7796-cpg-mssr.h2
-rw-r--r--include/linux/bitmap.h8
-rw-r--r--include/linux/bitops.h1
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/hid.h1
-rw-r--r--include/linux/indirect_call_wrapper.h51
-rw-r--r--include/linux/list_lru.h1
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/mm.h15
-rw-r--r--include/linux/net_dim.h1
-rw-r--r--include/linux/of.h4
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pipe_fs_i.h10
-rw-r--r--include/linux/qed/qed_if.h32
-rw-r--r--include/linux/qed/qed_rdma_if.h2
-rw-r--r--include/linux/qed/qede_rdma.h10
-rw-r--r--include/linux/sched/signal.h18
-rw-r--r--include/linux/sched/user.h7
-rw-r--r--include/net/inet_common.h9
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/rdma/iw_cm.h3
-rw-r--r--include/rdma/rdmavt_qp.h1
-rw-r--r--include/scsi/libsas.h13
-rw-r--r--include/uapi/linux/fs.h3
-rw-r--r--kernel/bpf/hashtab.c23
-rw-r--r--kernel/bpf/syscall.c5
-rw-r--r--kernel/fork.c31
-rw-r--r--kernel/livepatch/core.c91
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/signal.c61
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--lib/bitmap.c20
-rw-r--r--mm/gup.c55
-rw-r--r--mm/hugetlb.c13
-rw-r--r--mm/list_lru.c8
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/mempolicy.c32
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/core/dev.c15
-rw-r--r--net/ipv4/af_inet.c13
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/route.c30
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_offload.c6
-rw-r--r--net/ipv4/udp_offload.c15
-rw-r--r--net/ipv6/ip6_offload.c35
-rw-r--r--net/ipv6/tcpv6_offload.c7
-rw-r--r--net/ipv6/udp_offload.c7
-rw-r--r--net/rds/ib_fmr.c11
-rw-r--r--net/rds/ib_rdma.c3
-rw-r--r--net/rose/rose_loopback.c26
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c35
-rw-r--r--net/tipc/name_table.c3
-rw-r--r--net/tls/tls_main.c19
-rw-r--r--net/xdp/xdp_umem.c1
-rw-r--r--security/keys/process_keys.c31
-rw-r--r--security/keys/request_key.c5
-rw-r--r--sound/pci/hda/patch_realtek.c25
-rw-r--r--sound/soc/codecs/cx2072x.c12
-rw-r--r--tools/objtool/check.c3
383 files changed, 21395 insertions, 6034 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 1afba2acce34..d6f2f75fa8e5 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -393,8 +393,7 @@ Description: Information about CPU vulnerabilities
"Vulnerable" CPU is affected and no mitigation in effect
"Mitigation: $M" CPU is affected and mitigation $M is in effect
- Details about the l1tf file can be found in
- Documentation/admin-guide/l1tf.rst
+ See also: Documentation/admin-guide/hw-vuln/index.rst
What: /sys/devices/system/cpu/smt
/sys/devices/system/cpu/smt/active
diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst
new file mode 100644
index 000000000000..1de29d28903d
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/mds.rst
@@ -0,0 +1,307 @@
+MDS - Microarchitectural Data Sampling
+======================================
+
+Microarchitectural Data Sampling is a hardware vulnerability which allows
+unprivileged speculative access to data which is available in various CPU
+internal buffers.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+ - Processors from AMD, Centaur and other non Intel vendors
+
+ - Older processor models, where the CPU family is < 6
+
+ - Some Atoms (Bonnell, Saltwell, Goldmont, GoldmontPlus)
+
+ - Intel processors which have the ARCH_CAP_MDS_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR.
+
+Whether a processor is affected or not can be read out from the MDS
+vulnerability file in sysfs. See :ref:`mds_sys_info`.
+
+Not all processors are affected by all variants of MDS, but the mitigation
+is identical for all of them so the kernel treats them as a single
+vulnerability.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the MDS vulnerability:
+
+ ============== ===== ==============================================
+ CVE-2018-12126 MSBDS Microarchitectural Store Buffer Data Sampling
+ CVE-2018-12130 MFBDS Microarchitectural Fill Buffer Data Sampling
+ CVE-2018-12127 MLPDS Microarchitectural Load Port Data Sampling
+ ============== ===== ==============================================
+
+Problem
+-------
+
+When performing store, load, L1 refill operations, processors write data
+into temporary microarchitectural structures (buffers). The data in the
+buffer can be forwarded to load operations as an optimization.
+
+Under certain conditions, usually a fault/assist caused by a load
+operation, data unrelated to the load memory address can be speculatively
+forwarded from the buffers. Because the load operation causes a fault or
+assist and its result will be discarded, the forwarded data will not cause
+incorrect program execution or state changes. But a malicious operation
+may be able to forward this speculative data to a disclosure gadget which
+allows in turn to infer the value via a cache side channel attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+Deeper technical information is available in the MDS specific x86
+architecture section: :ref:`Documentation/x86/mds.rst <mds>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the MDS vulnerabilities can be mounted from malicious non
+priviledged user space applications running on hosts or guest. Malicious
+guest OSes can obviously mount attacks as well.
+
+Contrary to other speculation based vulnerabilities the MDS vulnerability
+does not allow the attacker to control the memory target address. As a
+consequence the attacks are purely sampling based, but as demonstrated with
+the TLBleed attack samples can be postprocessed successfully.
+
+Web-Browsers
+^^^^^^^^^^^^
+
+ It's unclear whether attacks through Web-Browsers are possible at
+ all. The exploitation through Java-Script is considered very unlikely,
+ but other widely used web technologies like Webassembly could possibly be
+ abused.
+
+
+.. _mds_sys_info:
+
+MDS system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current MDS
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/mds
+
+The possible values in this file are:
+
+ ========================================= =================================
+ 'Not affected' The processor is not vulnerable
+
+ 'Vulnerable' The processor is vulnerable,
+ but no mitigation enabled
+
+ 'Vulnerable: Clear CPU buffers attempted' The processor is vulnerable but
+ microcode is not updated.
+ The mitigation is enabled on a
+ best effort basis.
+ See :ref:`vmwerv`
+
+ 'Mitigation: CPU buffer clear' The processor is vulnerable and the
+ CPU buffer clearing mitigation is
+ enabled.
+ ========================================= =================================
+
+If the processor is vulnerable then the following information is appended
+to the above information:
+
+ ======================== ============================================
+ 'SMT vulnerable' SMT is enabled
+ 'SMT mitigated' SMT is enabled and mitigated
+ 'SMT disabled' SMT is disabled
+ 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown
+ ======================== ============================================
+
+.. _vmwerv:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the processor is vulnerable, but the availability of the microcode based
+ mitigation mechanism is not advertised via CPUID the kernel selects a best
+ effort mitigation mode. This mode invokes the mitigation instructions
+ without a guarantee that they clear the CPU buffers.
+
+ This is done to address virtualization scenarios where the host has the
+ microcode update applied, but the hypervisor is not yet updated to expose
+ the CPUID to the guest. If the host has updated microcode the protection
+ takes effect otherwise a few cpu cycles are wasted pointlessly.
+
+ The state in the mds sysfs file reflects this situation accordingly.
+
+
+Mitigation mechanism
+-------------------------
+
+The kernel detects the affected CPUs and the presence of the microcode
+which is required.
+
+If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default. The mitigation can be controlled at boot
+time via a kernel command line option. See
+:ref:`mds_mitigation_control_command_line`.
+
+.. _cpu_buffer_clear:
+
+CPU buffer clearing
+^^^^^^^^^^^^^^^^^^^
+
+ The mitigation for MDS clears the affected CPU buffers on return to user
+ space and when entering a guest.
+
+ If SMT is enabled it also clears the buffers on idle entry when the CPU
+ is only affected by MSBDS and not any other MDS variant, because the
+ other variants cannot be protected against cross Hyper-Thread attacks.
+
+ For CPUs which are only affected by MSBDS the user space, guest and idle
+ transition mitigations are sufficient and SMT is not affected.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The protection for host to guest transition depends on the L1TF
+ vulnerability of the CPU:
+
+ - CPU is affected by L1TF:
+
+ If the L1D flush mitigation is enabled and up to date microcode is
+ available, the L1D flush mitigation is automatically protecting the
+ guest transition.
+
+ If the L1D flush mitigation is disabled then the MDS mitigation is
+ invoked explicit when the host MDS mitigation is enabled.
+
+ For details on L1TF and virtualization see:
+ :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst <mitigation_control_kvm>`.
+
+ - CPU is not affected by L1TF:
+
+ CPU buffers are flushed before entering the guest when the host MDS
+ mitigation is enabled.
+
+ The resulting MDS protection matrix for the host to guest transition:
+
+ ============ ===== ============= ============ =================
+ L1TF MDS VMX-L1FLUSH Host MDS MDS-State
+
+ Don't care No Don't care N/A Not affected
+
+ Yes Yes Disabled Off Vulnerable
+
+ Yes Yes Disabled Full Mitigated
+
+ Yes Yes Enabled Don't care Mitigated
+
+ No Yes N/A Off Vulnerable
+
+ No Yes N/A Full Mitigated
+ ============ ===== ============= ============ =================
+
+ This only covers the host to guest transition, i.e. prevents leakage from
+ host to guest, but does not protect the guest internally. Guests need to
+ have their own protections.
+
+.. _xeon_phi:
+
+XEON PHI specific considerations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The XEON PHI processor family is affected by MSBDS which can be exploited
+ cross Hyper-Threads when entering idle states. Some XEON PHI variants allow
+ to use MWAIT in user space (Ring 3) which opens an potential attack vector
+ for malicious user space. The exposure can be disabled on the kernel
+ command line with the 'ring3mwait=disable' command line option.
+
+ XEON PHI is not affected by the other MDS variants and MSBDS is mitigated
+ before the CPU enters a idle state. As XEON PHI is not affected by L1TF
+ either disabling SMT is not required for full protection.
+
+.. _mds_smt_control:
+
+SMT control
+^^^^^^^^^^^
+
+ All MDS variants except MSBDS can be attacked cross Hyper-Threads. That
+ means on CPUs which are affected by MFBDS or MLPDS it is necessary to
+ disable SMT for full protection. These are most of the affected CPUs; the
+ exception is XEON PHI, see :ref:`xeon_phi`.
+
+ Disabling SMT can have a significant performance impact, but the impact
+ depends on the type of workloads.
+
+ See the relevant chapter in the L1TF mitigation documentation for details:
+ :ref:`Documentation/admin-guide/hw-vuln/l1tf.rst <smt_control>`.
+
+
+.. _mds_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the MDS mitigations at boot
+time with the option "mds=". The valid arguments for this option are:
+
+ ============ =============================================================
+ full If the CPU is vulnerable, enable all available mitigations
+ for the MDS vulnerability, CPU buffer clearing on exit to
+ userspace and when entering a VM. Idle transitions are
+ protected as well if SMT is enabled.
+
+ It does not automatically disable SMT.
+
+ off Disables MDS mitigations completely.
+
+ ============ =============================================================
+
+Not specifying this option is equivalent to "mds=full".
+
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace
+^^^^^^^^^^^^^^^^^^^^
+
+ If all userspace applications are from a trusted source and do not
+ execute untrusted code which is supplied externally, then the mitigation
+ can be disabled.
+
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The same considerations as above versus trusted user space apply.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The protection depends on the state of the L1TF mitigations.
+ See :ref:`virt_mechanism`.
+
+ If the MDS mitigation is enabled and SMT is disabled, guest to host and
+ guest to guest attacks are prevented.
+
+.. _mds_default_mitigations:
+
+Default mitigations
+-------------------
+
+ The kernel default mitigations for vulnerable processors are:
+
+ - Enable CPU buffer clearing
+
+ The kernel does not by default enforce the disabling of SMT, which leaves
+ SMT systems vulnerable when running untrusted code. The same rationale as
+ for L1TF applies.
+ See :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst <default_mitigations>`.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index fa14b5876bdd..022b9811f3ea 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2254,6 +2254,8 @@
Not specifying this option is equivalent to
mds=full.
+ For details see: Documentation/admin-guide/hw-vuln/mds.rst
+
mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
Amount of memory to be used when the kernel is not able
to see the whole system memory or for test.
diff --git a/Documentation/devicetree/bindings/net/ti,wilink-st.txt b/Documentation/devicetree/bindings/net/ti,wilink-st.txt
index cbad73a84ac4..3404a4a2dd86 100644
--- a/Documentation/devicetree/bindings/net/ti,wilink-st.txt
+++ b/Documentation/devicetree/bindings/net/ti,wilink-st.txt
@@ -22,6 +22,10 @@ Optional properties:
- enable-gpios : GPIO signal controlling enabling of BT. Active high.
- vio-supply : Vio input supply (1.8V)
- vbat-supply : Vbat input supply (2.9-4.8V)
+ - clocks : Must contain an entry, for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names : Must include the following entry:
+ "ext_clock" (External clock provided to the TI combo chip).
Example:
@@ -31,5 +35,7 @@ Example:
bluetooth {
compatible = "ti,wl1835-st";
enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ clocks = <&clk32k_wl18xx>;
+ clock-names = "ext_clock";
};
};
diff --git a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
index 945934918b71..abd92d62d81c 100644
--- a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
@@ -19,7 +19,7 @@ Example:
rtc: rtc@01f00000 {
compatible = "allwinner,sun6i-a31-rtc";
- reg = <0x01f00000 0x54>;
+ reg = <0x01f00000 0x400>;
interrupts = <0 40 4>, <0 41 4>;
clock-output-names = "osc32k";
clocks = <&ext_osc32k>;
diff --git a/Documentation/devicetree/bindings/sound/cx2072x.txt b/Documentation/devicetree/bindings/sound/cx2072x.txt
deleted file mode 100644
index 05ddf7c0320e..000000000000
--- a/Documentation/devicetree/bindings/sound/cx2072x.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Conexant CX20721/CX20723/CX7601 audio CODEC
-
-The devices support I2C only.
-
-Required properties:
-
- - compatible : One of "cnxt,cx20721", "cnxt,cx20723", "cnxt,cx7601".
-
- - reg : the I2C address of the device for I2C, it should be <0x33>
-
-Optional properties:
-
- - clocks : phandle and clock specifier for codec MCLK.
- - clock-names : Clock name string for 'clocks' attribute, should be "mclk".
-
-CODEC output pins:
- "PORTA" - Headphone
- "PORTG" - Class-D output
- "PORTE" - Line out
-
-CODEC output pins for Conexant DSP chip:
- "AEC REF" - AEC reference signal
-
-CODEC input pins:
- "PORTB" - Analog mic
- "PORTC" - Digital mic
- "PORTD" - Headset mic
-
-Example:
-
-codec: cx20721@33 {
- compatible = "cnxt,cx20721";
- reg = <0x33>;
- clocks = <&sco>;
- clock-names = "mclk";
-};
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c78ffb8a0983..066c93708b83 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -399,6 +399,7 @@ tcp_min_rtt_wlen - INTEGER
minimum RTT when it is moved to a longer path (e.g., due to traffic
engineering). A longer window makes the filter more resistant to RTT
inflations such as transient congestion. The unit is seconds.
+ Possible values: 0 - 86400 (1 day)
Default: 300
tcp_moderate_rcvbuf - BOOLEAN
diff --git a/Documentation/perf/thunderx2-pmu.txt b/Documentation/perf/thunderx2-pmu.txt
new file mode 100644
index 000000000000..dffc57143736
--- /dev/null
+++ b/Documentation/perf/thunderx2-pmu.txt
@@ -0,0 +1,41 @@
+Cavium ThunderX2 SoC Performance Monitoring Unit (PMU UNCORE)
+=============================================================
+
+The ThunderX2 SoC PMU consists of independent, system-wide, per-socket
+PMUs such as the Level 3 Cache (L3C) and DDR4 Memory Controller (DMC).
+
+The DMC has 8 interleaved channels and the L3C has 16 interleaved tiles.
+Events are counted for the default channel (i.e. channel 0) and prorated
+to the total number of channels/tiles.
+
+The DMC and L3C support up to 4 counters. Counters are independently
+programmable and can be started and stopped individually. Each counter
+can be set to a different event. Counters are 32-bit and do not support
+an overflow interrupt; they are read every 2 seconds.
+
+PMU UNCORE (perf) driver:
+
+The thunderx2_pmu driver registers per-socket perf PMUs for the DMC and
+L3C devices. Each PMU can be used to count up to 4 events
+simultaneously. The PMUs provide a description of their available events
+and configuration options under sysfs, see
+/sys/devices/uncore_<l3c_S/dmc_S/>; S is the socket id.
+
+The driver does not support sampling, therefore "perf record" will not
+work. Per-task perf sessions are also not supported.
+
+Examples:
+
+# perf stat -a -e uncore_dmc_0/cnt_cycles/ sleep 1
+
+# perf stat -a -e \
+uncore_dmc_0/cnt_cycles/,\
+uncore_dmc_0/data_transfers/,\
+uncore_dmc_0/read_txns/,\
+uncore_dmc_0/write_txns/ sleep 1
+
+# perf stat -a -e \
+uncore_l3c_0/read_request/,\
+uncore_l3c_0/read_hit/,\
+uncore_l3c_0/inv_request/,\
+uncore_l3c_0/inv_hit/ sleep 1
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 53c316f7301e..fe4932fda01d 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
}
};
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_2_data,
},
};
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
index db511ec2b1df..116feb6b261e 100644
--- a/arch/arm/mach-iop13xx/tpmi.c
+++ b/arch/arm/mach-iop13xx/tpmi.c
@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
}
};
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi",
.id = 0,
@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index a4d1f8de3b5b..d9612221e484 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_aau_data,
},
};
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index a2399fd66e97..1e970873439c 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
.resource = orion_xor0_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor0_pdata,
},
};
@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
.resource = orion_xor1_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor1_pdata,
},
};
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 30a33ff5f609..7fc5bdf85a57 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -87,6 +87,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+ { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
{ "instruction_gs", VCPU_STAT(instruction_gs) },
@@ -656,7 +657,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
case KVM_CAP_S390_GS:
r = -EINVAL;
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus)) {
+ if (kvm->created_vcpus) {
r = -EBUSY;
} else if (test_facility(133)) {
set_kvm_facility(kvm->arch.model.fac_mask, 133);
@@ -1241,7 +1242,7 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
return -EINVAL;
mutex_lock(&kvm->lock);
- if (!atomic_read(&kvm->online_vcpus)) {
+ if (!kvm->created_vcpus) {
bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
KVM_S390_VM_CPU_FEAT_NR_BITS);
ret = 0;
@@ -2252,6 +2253,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
/* we still need the basic sca for the ipte control */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+ return;
}
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d04a47dcb548..a6bb1b32ebc0 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -739,7 +739,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
if (gpa && (scb_s->ecb & ECB_TE)) {
- if (!(gpa & ~0x1fffU)) {
+ if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin;
}
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index da2f7e26cfa3..7419baf6997d 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -38,7 +38,8 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_get_speculative(head)))
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(head);
@@ -76,7 +77,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_add_speculative(head, refs))) {
*nr -= refs;
return 0;
}
@@ -150,7 +152,8 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_add_speculative(head, refs))) {
*nr -= refs;
return 0;
}
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d2c49023321f..50bd70a924d8 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -862,7 +862,7 @@ ENTRY(switch_to_thread_stack)
ret
END(switch_to_thread_stack)
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
@@ -882,6 +882,20 @@ ENTRY(\sym)
jnz .Lfrom_usermode_switch_stack_\@
.endif
+ .if \create_gap == 1
+ /*
+ * If coming from kernel space, create a 6-word gap to allow the
+ * int3 handler to emulate a call instruction.
+ */
+ testb $3, CS-ORIG_RAX(%rsp)
+ jnz .Lfrom_usermode_no_gap_\@
+ .rept 6
+ pushq 5*8(%rsp)
+ .endr
+ UNWIND_HINT_IRET_REGS offset=8
+.Lfrom_usermode_no_gap_\@:
+ .endif
+
.if \paranoid
call paranoid_entry
.else
@@ -1119,7 +1133,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
#endif /* CONFIG_HYPERV */
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry int3 do_int3 has_error_code=0
+idtentry int3 do_int3 has_error_code=0 create_gap=1
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 90395063383c..ed9ee6defd10 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -37,4 +37,32 @@ extern void *text_poke(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+{
+ regs->ip = ip;
+}
+
+#define INT3_INSN_SIZE 1
+#define CALL_INSN_SIZE 5
+
+#ifdef CONFIG_X86_64
+static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
+{
+ /*
+ * The int3 handler in entry_64.S adds a gap between the
+ * stack where the break point happened, and the saving of
+ * pt_regs. We can extend the original stack because of
+ * this gap. See the idtentry macro's create_gap option.
+ */
+ regs->sp -= sizeof(unsigned long);
+ *(unsigned long *)regs->sp = val;
+}
+
+static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
+{
+ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
+ int3_emulate_jmp(regs, func);
+}
+#endif
+
#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index cf25405444ab..415621ddb8a2 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -19,6 +19,8 @@
#include "cpu.h"
+#define APICID_SOCKET_ID_BIT 6
+
/*
* nodes_per_socket: Stores the number of nodes per socket.
* Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
if (!err)
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+ /* Socket ID is ApicId[6] for these processors. */
+ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+
cacheinfo_hygon_init_llc_id(c, cpu, node_id);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 9bef1bbeba63..c98f757395a2 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -28,6 +28,7 @@
#include <asm/kprobes.h>
#include <asm/ftrace.h>
#include <asm/nops.h>
+#include <asm/text-patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -227,6 +228,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
static unsigned long ftrace_update_func;
+static unsigned long ftrace_update_func_call;
static int update_ftrace_func(unsigned long ip, void *new)
{
@@ -255,6 +257,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned char *new;
int ret;
+ ftrace_update_func_call = (unsigned long)func;
+
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
@@ -290,13 +294,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
if (WARN_ON_ONCE(!regs))
return 0;
- ip = regs->ip - 1;
- if (!ftrace_location(ip) && !is_ftrace_caller(ip))
- return 0;
+ ip = regs->ip - INT3_INSN_SIZE;
- regs->ip += MCOUNT_INSN_SIZE - 1;
+#ifdef CONFIG_X86_64
+ if (ftrace_location(ip)) {
+ int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
+ return 1;
+ } else if (is_ftrace_caller(ip)) {
+ if (!ftrace_update_func_call) {
+ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
+ return 1;
+ }
+ int3_emulate_call(regs, ftrace_update_func_call);
+ return 1;
+ }
+#else
+ if (ftrace_location(ip) || is_ftrace_caller(ip)) {
+ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
+ return 1;
+ }
+#endif
- return 1;
+ return 0;
}
static int ftrace_write(unsigned long ip, const char *val, int size)
@@ -867,6 +886,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
func = ftrace_ops_get_func(ops);
+ ftrace_update_func_call = (unsigned long)func;
+
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
@@ -963,6 +984,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
{
unsigned char *new;
+ ftrace_update_func_call = 0UL;
new = ftrace_jmp_replace(ip, (unsigned long)func);
return update_ftrace_func(ip, new);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 456dfdfd2249..7eeffee21ace 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -137,7 +137,10 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
- get_page(page);
+ if (unlikely(!try_get_page(page))) {
+ put_dev_pagemap(pgmap);
+ break;
+ }
put_dev_pagemap(pgmap);
SetPageReferenced(page);
pages[*nr] = page;
@@ -173,9 +176,12 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
+ if (unlikely(!try_get_page(page))) {
+ put_dev_pagemap(pgmap);
+ return 0;
+ }
SetPageReferenced(page);
pages[*nr] = page;
- get_page(page);
put_dev_pagemap(pgmap);
(*nr)++;
pfn++;
@@ -219,6 +225,8 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
refs = 0;
head = pmd_page(pmd);
+ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
+ return 0;
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON_PAGE(compound_head(page) != head, page);
@@ -282,6 +290,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
refs = 0;
head = pud_page(pud);
+ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
+ return 0;
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON_PAGE(compound_head(page) != head, page);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 9c67f39ecfff..6e01bd9d6907 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -123,7 +123,7 @@ static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
- return ctx->dev->u_ctx;
+ return container_of(ctx->dev, struct uld_ctx, dev);
}
static inline int is_ofld_imm(const struct sk_buff *skb)
@@ -198,18 +198,36 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
-static inline void chcr_handle_aead_resp(struct aead_request *req,
+static int chcr_inc_wrcount(struct chcr_dev *dev)
+{
+ if (dev->state == CHCR_DETACH)
+ return 1;
+ atomic_inc(&dev->inflight);
+ return 0;
+}
+
+static inline void chcr_dec_wrcount(struct chcr_dev *dev)
+{
+ atomic_dec(&dev->inflight);
+}
+
+static inline int chcr_handle_aead_resp(struct aead_request *req,
unsigned char *input,
int err)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_dev *dev = a_ctx(tfm)->dev;
chcr_aead_common_exit(req);
if (reqctx->verify == VERIFY_SW) {
chcr_verify_tag(req, input, &err);
reqctx->verify = VERIFY_HW;
}
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
+
+ return err;
}
static void get_aes_decrypt_key(unsigned char *dec_key,
@@ -391,7 +409,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
size_t size,
- dma_addr_t *addr)
+ dma_addr_t addr)
{
int j;
@@ -399,7 +417,7 @@ static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
return;
j = walk->nents;
walk->to->len[j % 8] = htons(size);
- walk->to->addr[j % 8] = cpu_to_be64(*addr);
+ walk->to->addr[j % 8] = cpu_to_be64(addr);
j++;
if ((j % 8) == 0)
walk->to++;
@@ -473,16 +491,16 @@ static inline void ulptx_walk_end(struct ulptx_walk *walk)
static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
size_t size,
- dma_addr_t *addr)
+ dma_addr_t addr)
{
if (!size)
return;
if (walk->nents == 0) {
walk->sgl->len0 = cpu_to_be32(size);
- walk->sgl->addr0 = cpu_to_be64(*addr);
+ walk->sgl->addr0 = cpu_to_be64(addr);
} else {
- walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
walk->pair_idx = !walk->pair_idx;
if (!walk->pair_idx)
@@ -717,7 +735,7 @@ static inline void create_wreq(struct chcr_context *ctx,
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
- FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
+ FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
!!lcb, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
@@ -773,7 +791,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
}
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1076,8 +1094,8 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
- AES_BLOCK_SIZE));
+ ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
+ AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
@@ -1100,6 +1118,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct cipher_wr_param wrparam;
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
if (err)
@@ -1161,6 +1180,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
return err;
}
@@ -1187,7 +1207,10 @@ static int process_cipher(struct ablkcipher_request *req,
ablkctx->enckey_len, req->nbytes, ivsize);
goto error;
}
- chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+
+ err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ if (err)
+ goto error;
if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
AES_MIN_KEY_SIZE +
sizeof(struct cpl_rx_phys_dsgl) +
@@ -1276,15 +1299,21 @@ error:
static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -ENOSPC;
+ goto error;
+ }
}
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
@@ -1295,15 +1324,23 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return isfull ? -EBUSY : -EINPROGRESS;
+error:
+ chcr_dec_wrcount(dev);
+ return err;
}
static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
+
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
isfull = 1;
@@ -1311,8 +1348,8 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -ENOSPC;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
- &skb, CHCR_DECRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
@@ -1324,7 +1361,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
static int chcr_device_init(struct chcr_context *ctx)
{
struct uld_ctx *u_ctx = NULL;
- struct adapter *adap;
unsigned int id;
int txq_perchan, txq_idx, ntxq;
int err = 0, rxq_perchan, rxq_idx;
@@ -1333,18 +1369,17 @@ static int chcr_device_init(struct chcr_context *ctx)
if (!ctx->dev) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
+ err = -ENXIO;
pr_err("chcr device assignment fails\n");
goto out;
}
- ctx->dev = u_ctx->dev;
- adap = padap(ctx->dev);
+ ctx->dev = &u_ctx->dev;
ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
- ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -1498,7 +1533,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1562,6 +1597,7 @@ static int chcr_ahash_update(struct ahash_request *req)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = NULL;
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
@@ -1570,12 +1606,6 @@ static int chcr_ahash_update(struct ahash_request *req)
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
if (nbytes + req_ctx->reqlen >= bs) {
remainder = (nbytes + req_ctx->reqlen) % bs;
@@ -1586,10 +1616,27 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes;
return 0;
}
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
+ /* Detach state for CHCR means lldi or padap is freed. Increasing
+ * inflight count for dev guarantees that lldi and padap is valid
+ */
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
+ }
+
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
@@ -1629,6 +1676,8 @@ static int chcr_ahash_update(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1646,10 +1695,16 @@ static int chcr_ahash_final(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct hash_wr_param params;
struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ int error = -EINVAL;
+
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
chcr_init_hctx_per_wr(req_ctx);
u_ctx = ULD_CTX(h_ctx(rtfm));
@@ -1686,19 +1741,25 @@ static int chcr_ahash_final(struct ahash_request *req)
}
params.hash_size = crypto_ahash_digestsize(rtfm);
skb = create_hash_wr(req, &params);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto err;
+ }
req_ctx->reqlen = 0;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+err:
+ chcr_dec_wrcount(dev);
+ return error;
}
static int chcr_ahash_finup(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
@@ -1707,17 +1768,24 @@ static int chcr_ahash_finup(struct ahash_request *req)
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1774,6 +1842,8 @@ static int chcr_ahash_finup(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1781,6 +1851,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
@@ -1789,19 +1860,26 @@ static int chcr_ahash_digest(struct ahash_request *req)
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1854,6 +1932,8 @@ static int chcr_ahash_digest(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1925,6 +2005,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
int digestsize, updated_digestsize;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+ struct chcr_dev *dev = h_ctx(tfm)->dev;
if (input == NULL)
goto out;
@@ -1967,6 +2048,7 @@ unmap:
out:
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
}
@@ -1983,14 +2065,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- chcr_handle_aead_resp(aead_request_cast(req), input, err);
+ err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ chcr_handle_cipher_resp(ablkcipher_request_cast(req),
input, err);
break;
-
case CRYPTO_ALG_TYPE_AHASH:
chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
}
@@ -2008,7 +2089,7 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
chcr_init_hctx_per_wr(state);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -2215,10 +2296,7 @@ static int chcr_aead_common_init(struct aead_request *req)
error = -ENOMEM;
goto err;
}
- reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
- CHCR_SRC_SG_SIZE, 0);
- reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
- CHCR_SRC_SG_SIZE, req->assoclen);
+
return 0;
err:
return error;
@@ -2249,7 +2327,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
- aead_request_set_ad(subreq, req->assoclen);
+ aead_request_set_ad(subreq, req->assoclen);
return op_type ? crypto_aead_decrypt(subreq) :
crypto_aead_encrypt(subreq);
}
@@ -2268,10 +2346,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
- unsigned int kctx_len = 0, dnents;
- unsigned int assoclen = req->assoclen;
+ unsigned int kctx_len = 0, dnents, snents;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -2288,24 +2366,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
- assoclen = 0;
- reqctx->aad_nents = 0;
}
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
dnents += MIN_AUTH_SG; // For IV
-
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
- : (sgl_len(reqctx->src_nents + reqctx->aad_nents
- + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
+ : (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
@@ -2315,7 +2389,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
@@ -2331,16 +2405,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
- assoclen + 1);
- chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
+ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1,
+ null ? 0 : 1 + IV,
+ null ? 0 : IV + req->assoclen,
+ req->assoclen + IV + 1,
(temp & 0x1F0) >> 4);
chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
temp & 0xF,
- null ? 0 : assoclen + IV + 1,
+ null ? 0 : req->assoclen + IV + 1,
temp, temp);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
@@ -2367,23 +2441,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
- memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
} else {
- memcpy(reqctx->iv, req->iv, IV);
+ memcpy(ivptr, req->iv, IV);
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, 0);
reqctx->skb = skb;
@@ -2470,8 +2545,7 @@ void chcr_aead_dma_unmap(struct device *dev,
}
void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen)
+ struct ulptx_sgl *ulptx)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2484,28 +2558,20 @@ void chcr_add_aead_src_ent(struct aead_request *req,
buf += reqctx->b0_len;
}
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, assoclen, 0);
- buf += assoclen;
- memcpy(buf, reqctx->iv, IV);
- buf += IV;
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, req->cryptlen, req->assoclen);
+ buf, req->cryptlen + req->assoclen, 0);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
if (reqctx->b0_len)
ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
- &reqctx->b0_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
- ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
- req->assoclen);
+ reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
+ req->assoclen, 0);
ulptx_walk_end(&ulp_walk);
}
}
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2516,12 +2582,10 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl);
- if (reqctx->b0_len)
- dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
- dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
- temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+ dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+ temp = req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
@@ -2589,7 +2653,7 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
ulptx_walk_init(&ulp_walk, ulptx);
if (param->bfr_len)
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
- &reqctx->hctx_wr.dma_addr);
+ reqctx->hctx_wr.dma_addr);
ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
param->sg_len, reqctx->hctx_wr.src_ofst);
reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
@@ -2689,8 +2753,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize)
return 0;
}
-static void generate_b0(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
+static int generate_b0(struct aead_request *req, u8 *ivptr,
unsigned short op_type)
{
unsigned int l, lp, m;
@@ -2701,7 +2764,7 @@ static void generate_b0(struct aead_request *req,
m = crypto_aead_authsize(aead);
- memcpy(b0, reqctx->iv, 16);
+ memcpy(b0, ivptr, 16);
lp = b0[0];
l = lp + 1;
@@ -2715,6 +2778,8 @@ static void generate_b0(struct aead_request *req,
rc = set_msg_len(b0 + 16 - l,
(op_type == CHCR_DECRYPT_OP) ?
req->cryptlen - m : req->cryptlen, l);
+
+ return rc;
}
static inline int crypto_ccm_check_iv(const u8 *iv)
@@ -2727,29 +2792,31 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
}
static int ccm_format_packet(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
+ u8 *ivptr,
unsigned int sub_type,
unsigned short op_type,
unsigned int assoclen)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
int rc = 0;
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
- reqctx->iv[0] = 3;
- memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
- memcpy(reqctx->iv + 4, req->iv, 8);
- memset(reqctx->iv + 12, 0, 4);
+ ivptr[0] = 3;
+ memcpy(ivptr + 1, &aeadctx->salt[0], 3);
+ memcpy(ivptr + 4, req->iv, 8);
+ memset(ivptr + 12, 0, 4);
} else {
- memcpy(reqctx->iv, req->iv, 16);
+ memcpy(ivptr, req->iv, 16);
}
if (assoclen)
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(assoclen);
- generate_b0(req, aeadctx, op_type);
+ rc = generate_b0(req, ivptr, op_type);
/* zero the ctr value */
- memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
return rc;
}
@@ -2762,7 +2829,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
+ unsigned int c_id = a_ctx(tfm)->tx_chan_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2775,7 +2842,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
auth_offset = req->cryptlen ?
- (assoclen + IV + 1 + ccm_xtra) : 0;
+ (req->assoclen + IV + 1 + ccm_xtra) : 0;
if (op_type == CHCR_DECRYPT_OP) {
if (crypto_aead_authsize(tfm) != req->cryptlen)
tag_offset = crypto_aead_authsize(tfm);
@@ -2785,13 +2852,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, assoclen + 1 + ccm_xtra);
+ 2, 1);
sec_cpl->pldlen =
- htonl(assoclen + IV + req->cryptlen + ccm_xtra);
+ htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- 1, assoclen + ccm_xtra, assoclen
- + IV + 1 + ccm_xtra, 0);
+ 1 + IV, IV + assoclen + ccm_xtra,
+ req->assoclen + IV + 1 + ccm_xtra, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
auth_offset, tag_offset,
@@ -2838,10 +2905,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, kctx_len, dnents, temp;
+ unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
unsigned int sub_type, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2857,37 +2925,38 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
if (error)
goto err;
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
+ (reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
+ CHCR_DST_SG_SIZE, 0);
dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
+ snents += MIN_CCM_SG; //For B0
kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
reqctx->b0_len) <= SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
reqctx->b0_len, 16) :
- (sgl_len(reqctx->src_nents + reqctx->aad_nents +
- MIN_CCM_SG) * 8);
+ (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
- reqctx->b0_len, transhdr_len, reqctx->op)) {
+ reqctx->b0_len, transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
@@ -2897,16 +2966,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
+ error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
if (error)
goto dstmap_fail;
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
reqctx->b0_len) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
transhdr_len, temp, 0);
@@ -2931,10 +3001,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
- unsigned int transhdr_len, dnents = 0;
+ unsigned int transhdr_len, dnents = 0, snents;
unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2946,19 +3017,19 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
error = chcr_aead_common_init(req);
if (error)
return ERR_PTR(error);
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
(reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
+ CHCR_DST_SG_SIZE, 0);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
- (sgl_len(reqctx->src_nents +
- reqctx->aad_nents + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
+ (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
@@ -2968,7 +3039,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
@@ -2979,15 +3050,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
//Offset of tag from end
temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- a_ctx(tfm)->dev->rx_channel_id, 2,
- (assoclen + 1));
+ a_ctx(tfm)->tx_chan_id, 2, 1);
chcr_req->sec_cpl.pldlen =
- htonl(assoclen + IV + req->cryptlen);
+ htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1, 0);
+ assoclen ? 1 + IV : 0,
+ assoclen ? IV + assoclen : 0,
+ req->assoclen + IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
temp, temp);
chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
@@ -3002,25 +3073,26 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
/* prepare a 16 byte iv */
/* S A L T | IV | 0x00000001 */
if (get_aead_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
- memcpy(reqctx->iv, aeadctx->salt, 4);
- memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
+ memcpy(ivptr, aeadctx->salt, 4);
+ memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
} else {
- memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
+ memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
}
- *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+ *((unsigned int *)(ivptr + 12)) = htonl(0x01);
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ ulptx = (struct ulptx_sgl *)(ivptr + 16);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, reqctx->verify);
reqctx->skb = skb;
@@ -3118,12 +3190,12 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
aeadctx->mayverify = VERIFY_HW;
break;
case ICV_12:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- aeadctx->mayverify = VERIFY_HW;
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
break;
case ICV_14:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
- aeadctx->mayverify = VERIFY_HW;
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
break;
case ICV_16:
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
@@ -3565,27 +3637,42 @@ static int chcr_aead_op(struct aead_request *req,
create_wr_t create_wr_fn)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct uld_ctx *u_ctx;
struct sk_buff *skb;
int isfull = 0;
+ struct chcr_dev *cdev;
- if (!a_ctx(tfm)->dev) {
+ cdev = a_ctx(tfm)->dev;
+ if (!cdev) {
pr_err("chcr : %s : No crypto device.\n", __func__);
return -ENXIO;
}
+
+ if (chcr_inc_wrcount(cdev)) {
+ /* Detach state for CHCR means lldi or padap is freed.
+ * We cannot increment fallback here.
+ */
+ return chcr_aead_fallback(req, reqctx->op);
+ }
+
u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
a_ctx(tfm)->tx_qidx)) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ chcr_dec_wrcount(cdev);
return -ENOSPC;
+ }
}
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
+ if (IS_ERR_OR_NULL(skb)) {
+ chcr_dec_wrcount(cdev);
+ return PTR_ERR_OR_ZERO(skb);
+ }
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
@@ -4178,7 +4265,6 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
-
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 1871500309e2..ee20dd899e83 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -262,7 +262,7 @@
#define MIN_AUTH_SG 1 /* IV */
#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
-#define MIN_CCM_SG 2 /*IV+B0*/
+#define MIN_CCM_SG 1 /*IV+B0*/
#define CIP_SPACE_LEFT(len) \
((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
#define HASH_SPACE_LEFT(len) \
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 2c472e3c6aeb..029a7354f541 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -26,10 +26,7 @@
#include "chcr_core.h"
#include "cxgb4_uld.h"
-static LIST_HEAD(uld_ctx_list);
-static DEFINE_MUTEX(dev_mutex);
-static atomic_t dev_count;
-static struct uld_ctx *ctx_rr;
+static struct chcr_driver_data drv_data;
typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
@@ -53,6 +50,29 @@ static struct cxgb4_uld_info chcr_uld_info = {
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
+static void detach_work_fn(struct work_struct *work)
+{
+ struct chcr_dev *dev;
+
+ dev = container_of(work, struct chcr_dev, detach_work.work);
+
+ if (atomic_read(&dev->inflight)) {
+ dev->wqretry--;
+ if (dev->wqretry) {
+ pr_debug("Request Inflight Count %d\n",
+ atomic_read(&dev->inflight));
+
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ } else {
+ WARN(1, "CHCR:%d request Still Pending\n",
+ atomic_read(&dev->inflight));
+ complete(&dev->detach_comp);
+ }
+ } else {
+ complete(&dev->detach_comp);
+ }
+}
+
struct uld_ctx *assign_chcr_device(void)
{
struct uld_ctx *u_ctx = NULL;
@@ -63,56 +83,74 @@ struct uld_ctx *assign_chcr_device(void)
* Although One session must use the same device to
* maintain request-response ordering.
*/
- mutex_lock(&dev_mutex);
- if (!list_empty(&uld_ctx_list)) {
- u_ctx = ctx_rr;
- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
- ctx_rr = list_first_entry(&uld_ctx_list,
- struct uld_ctx,
- entry);
+ mutex_lock(&drv_data.drv_mutex);
+ if (!list_empty(&drv_data.act_dev)) {
+ u_ctx = drv_data.last_dev;
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
+ struct uld_ctx, entry);
else
- ctx_rr = list_next_entry(ctx_rr, entry);
+ drv_data.last_dev =
+ list_next_entry(drv_data.last_dev, entry);
}
- mutex_unlock(&dev_mutex);
+ mutex_unlock(&drv_data.drv_mutex);
return u_ctx;
}
-static int chcr_dev_add(struct uld_ctx *u_ctx)
+static void chcr_dev_add(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENXIO;
+ dev = &u_ctx->dev;
+ dev->state = CHCR_ATTACH;
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_move(&u_ctx->entry, &drv_data.act_dev);
+ if (!drv_data.last_dev)
+ drv_data.last_dev = u_ctx;
+ mutex_unlock(&drv_data.drv_mutex);
+}
+
+static void chcr_dev_init(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev;
+ dev = &u_ctx->dev;
spin_lock_init(&dev->lock_chcr_dev);
- u_ctx->dev = dev;
- dev->u_ctx = u_ctx;
- atomic_inc(&dev_count);
- mutex_lock(&dev_mutex);
- list_add_tail(&u_ctx->entry, &uld_ctx_list);
- if (!ctx_rr)
- ctx_rr = u_ctx;
- mutex_unlock(&dev_mutex);
- return 0;
+ INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
+ init_completion(&dev->detach_comp);
+ dev->state = CHCR_INIT;
+ dev->wqretry = WQ_RETRY;
+ atomic_inc(&drv_data.dev_count);
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
+ if (!drv_data.last_dev)
+ drv_data.last_dev = u_ctx;
+ mutex_unlock(&drv_data.drv_mutex);
}
-static int chcr_dev_remove(struct uld_ctx *u_ctx)
+static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- if (ctx_rr == u_ctx) {
- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
- ctx_rr = list_first_entry(&uld_ctx_list,
- struct uld_ctx,
- entry);
+ struct adapter *adap;
+
+ mutex_lock(&drv_data.drv_mutex);
+ if (drv_data.last_dev == u_ctx) {
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
+ struct uld_ctx, entry);
else
- ctx_rr = list_next_entry(ctx_rr, entry);
+ drv_data.last_dev =
+ list_next_entry(drv_data.last_dev, entry);
}
- list_del(&u_ctx->entry);
- if (list_empty(&uld_ctx_list))
- ctx_rr = NULL;
- kfree(u_ctx->dev);
- u_ctx->dev = NULL;
- atomic_dec(&dev_count);
+ list_move(&u_ctx->entry, &drv_data.inact_dev);
+ if (list_empty(&drv_data.act_dev))
+ drv_data.last_dev = NULL;
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+ atomic_dec(&drv_data.dev_count);
+ mutex_unlock(&drv_data.drv_mutex);
+
return 0;
}
@@ -131,12 +169,8 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
ack_err_status =
ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
- if (ack_err_status) {
- if (CHK_MAC_ERR_BIT(ack_err_status) ||
- CHK_PAD_ERR_BIT(ack_err_status))
- error_status = -EBADMSG;
- atomic_inc(&adap->chcr_stats.error);
- }
+ if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
+ error_status = -EBADMSG;
/* call completion callback with failure status */
if (req) {
error_status = chcr_handle_resp(req, input, error_status);
@@ -144,6 +178,9 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
}
+ if (error_status)
+ atomic_inc(&adap->chcr_stats.error);
+
return 0;
}
@@ -167,6 +204,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
+ chcr_dev_init(u_ctx);
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
chcr_add_xfrmops(lld);
@@ -179,7 +217,7 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl)
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
- struct chcr_dev *dev = u_ctx->dev;
+ struct chcr_dev *dev = &u_ctx->dev;
const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
if (rpl->opcode != CPL_FW6_PLD) {
@@ -201,6 +239,24 @@ int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
}
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+static void chcr_detach_device(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev = &u_ctx->dev;
+
+ if (dev->state == CHCR_DETACH) {
+ pr_debug("Detached Event received for already detach device\n");
+ return;
+ }
+ dev->state = CHCR_DETACH;
+ if (atomic_read(&dev->inflight) != 0) {
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ wait_for_completion(&dev->detach_comp);
+ }
+
+ // Move u_ctx to inactive_dev list
+ chcr_dev_move(u_ctx);
+}
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
@@ -208,23 +264,16 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
switch (state) {
case CXGB4_STATE_UP:
- if (!u_ctx->dev) {
- ret = chcr_dev_add(u_ctx);
- if (ret != 0)
- return ret;
+ if (u_ctx->dev.state != CHCR_INIT) {
+ // ALready Initialised.
+ return 0;
}
- if (atomic_read(&dev_count) == 1)
- ret = start_crypto();
+ chcr_dev_add(u_ctx);
+ ret = start_crypto();
break;
case CXGB4_STATE_DETACH:
- if (u_ctx->dev) {
- mutex_lock(&dev_mutex);
- chcr_dev_remove(u_ctx);
- mutex_unlock(&dev_mutex);
- }
- if (!atomic_read(&dev_count))
- stop_crypto();
+ chcr_detach_device(u_ctx);
break;
case CXGB4_STATE_START_RECOVERY:
@@ -237,7 +286,13 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
+ INIT_LIST_HEAD(&drv_data.act_dev);
+ INIT_LIST_HEAD(&drv_data.inact_dev);
+ atomic_set(&drv_data.dev_count, 0);
+ mutex_init(&drv_data.drv_mutex);
+ drv_data.last_dev = NULL;
cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
+
return 0;
}
@@ -245,18 +300,20 @@ static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
- if (atomic_read(&dev_count))
- stop_crypto();
+ stop_crypto();
+ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
- mutex_lock(&dev_mutex);
- list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
- if (u_ctx->dev)
- chcr_dev_remove(u_ctx);
+ mutex_lock(&drv_data.drv_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ list_del(&u_ctx->entry);
kfree(u_ctx);
}
- mutex_unlock(&dev_mutex);
- cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&drv_data.drv_mutex);
}
module_init(chcr_crypto_init);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index de3a9c085daf..ad874d548aa5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -47,7 +47,7 @@
#define MAX_PENDING_REQ_TO_HW 20
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
-
+#define WQ_DETACH_TM (msecs_to_jiffies(50))
#define PAD_ERROR_BIT 1
#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
@@ -61,9 +61,6 @@
#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
DUMMY_BYTES + \
sizeof(struct ulptx_sgl))
-
-#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
-
struct uld_ctx;
struct _key_ctx {
@@ -121,6 +118,20 @@ struct _key_ctx {
#define KEYCTX_TX_WR_AUTHIN_G(x) \
(((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
+#define WQ_RETRY 5
+struct chcr_driver_data {
+ struct list_head act_dev;
+ struct list_head inact_dev;
+ atomic_t dev_count;
+ struct mutex drv_mutex;
+ struct uld_ctx *last_dev;
+};
+
+enum chcr_state {
+ CHCR_INIT = 0,
+ CHCR_ATTACH,
+ CHCR_DETACH,
+};
struct chcr_wr {
struct fw_crypto_lookaside_wr wreq;
struct ulp_txpkt ulptx;
@@ -131,15 +142,18 @@ struct chcr_wr {
struct chcr_dev {
spinlock_t lock_chcr_dev;
- struct uld_ctx *u_ctx;
+ enum chcr_state state;
+ atomic_t inflight;
+ int wqretry;
+ struct delayed_work detach_work;
+ struct completion detach_comp;
unsigned char tx_channel_id;
- unsigned char rx_channel_id;
};
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
- struct chcr_dev *dev;
+ struct chcr_dev dev;
};
struct sge_opaque_hdr {
@@ -159,8 +173,17 @@ struct chcr_ipsec_wr {
struct chcr_ipsec_req req;
};
+#define ESN_IV_INSERT_OFFSET 12
+struct chcr_ipsec_aadiv {
+ __be32 spi;
+ u8 seq_no[8];
+ u8 iv[8];
+};
+
struct ipsec_sa_entry {
int hmac_ctrl;
+ u16 esn;
+ u16 resv;
unsigned int enckey_len;
unsigned int kctx_len;
unsigned int authsize;
@@ -181,6 +204,13 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static inline void *padap(struct chcr_dev *dev)
+{
+ struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev);
+
+ return pci_get_drvdata(u_ctx->lldi.pdev);
+}
+
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d37ef41f9ebe..655606f2e4d0 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -41,7 +41,8 @@
#define CCM_B0_SIZE 16
#define CCM_AAD_FIELD_SIZE 2
-#define T6_MAX_AAD_SIZE 511
+// 511 - 16(For IV)
+#define T6_MAX_AAD_SIZE 495
/* Define following if h/w is not dropping the AAD and IV data before
@@ -185,9 +186,6 @@ struct chcr_aead_reqctx {
dma_addr_t b0_dma;
unsigned int b0_len;
unsigned int op;
- short int aad_nents;
- short int src_nents;
- short int dst_nents;
u16 imm;
u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
@@ -322,10 +320,8 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
unsigned short op_type);
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
unsigned short qid);
-void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
- unsigned int assoclen);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 461b97e2f1fd..55be4f02a76a 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -76,12 +76,14 @@ static int chcr_xfrm_add_state(struct xfrm_state *x);
static void chcr_xfrm_del_state(struct xfrm_state *x);
static void chcr_xfrm_free_state(struct xfrm_state *x);
static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static void chcr_advance_esn_state(struct xfrm_state *x);
static const struct xfrmdev_ops chcr_xfrmdev_ops = {
.xdo_dev_state_add = chcr_xfrm_add_state,
.xdo_dev_state_delete = chcr_xfrm_del_state,
.xdo_dev_state_free = chcr_xfrm_free_state,
.xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = chcr_advance_esn_state,
};
/* Add offload xfrms to Chelsio Interface */
@@ -210,10 +212,6 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
pr_debug("CHCR: Cannot offload compressed xfrm states\n");
return -EINVAL;
}
- if (x->props.flags & XFRM_STATE_ESN) {
- pr_debug("CHCR: Cannot offload ESN xfrm states\n");
- return -EINVAL;
- }
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
@@ -266,6 +264,8 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
}
sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ if (x->props.flags & XFRM_STATE_ESN)
+ sa_entry->esn = 1;
chcr_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
try_module_get(THIS_MODULE);
@@ -294,36 +294,71 @@ static void chcr_xfrm_free_state(struct xfrm_state *x)
static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
+ if (x->props.family == AF_INET) {
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+ /* Inline single pdu */
+ if (skb_shinfo(skb)->gso_size)
return false;
-
return true;
}
-static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+static void chcr_advance_esn_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry)
{
- int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+ unsigned int kctx_len;
+ int hdrlen;
+
+ kctx_len = sa_entry->kctx_len;
+ hdrlen = sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) + kctx_len;
hdrlen += sizeof(struct cpl_tx_pkt);
+ if (sa_entry->esn)
+ hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
+ << 4);
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
}
static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
- unsigned int kctx_len)
+ struct ipsec_sa_entry *sa_entry,
+ bool *immediate)
{
+ unsigned int kctx_len;
unsigned int flits;
- int hdrlen = is_eth_imm(skb, kctx_len);
+ int aadivlen;
+ int hdrlen;
+
+ kctx_len = sa_entry->kctx_len;
+ hdrlen = is_eth_imm(skb, sa_entry);
+ aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ 16) : 0;
+ aadivlen <<= 4;
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
* TX Packet header plus the skb data in the Work Request.
*/
- if (hdrlen)
+ if (hdrlen) {
+ *immediate = true;
return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+ }
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
@@ -338,13 +373,69 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
flits += (sizeof(struct fw_ulptx_wr) +
sizeof(struct chcr_ipsec_req) +
kctx_len +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ sizeof(struct cpl_tx_pkt_core) +
+ aadivlen) / sizeof(__be64);
return flits;
}
+inline void *copy_esn_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct chcr_ipsec_aadiv *aadiv;
+ struct ulptx_idata *sc_imm;
+ struct ip_esp_hdr *esphdr;
+ struct xfrm_offload *xo;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ struct port_info *pi;
+ __be64 seqno;
+ u32 qidx;
+ u32 seqlo;
+ u8 *iv;
+ int eoq;
+ int len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ if (!eoq)
+ pos = q->q.desc;
+
+ len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
+ memset(pos, 0, len);
+ aadiv = (struct chcr_ipsec_aadiv *)pos;
+ esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ xo = xfrm_offload(skb);
+
+ aadiv->spi = (esphdr->spi);
+ seqlo = htonl(esphdr->seq_no);
+ seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
+ memcpy(aadiv->seq_no, &seqno, 8);
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ memcpy(aadiv->iv, iv, 8);
+
+ if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
+ sc_imm = (struct ulptx_idata *)(pos +
+ (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ sizeof(__be64)) << 3));
+ sc_imm->cmd_more = FILL_CMD_MORE(0);
+ sc_imm->len = cpu_to_be32(skb->len);
+ }
+ pos += len;
+ return pos;
+}
+
inline void *copy_cpltx_pktxt(struct sk_buff *skb,
- struct net_device *dev,
- void *pos)
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
{
struct cpl_tx_pkt_core *cpl;
struct sge_eth_txq *q;
@@ -379,6 +470,9 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
cpl->ctrl1 = cpu_to_be64(cntrl);
pos += sizeof(struct cpl_tx_pkt_core);
+ /* Copy ESN info for HW */
+ if (sa_entry->esn)
+ pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
return pos;
}
@@ -425,7 +519,7 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
pos = (u8 *)q->q.desc + (key_len - left);
}
/* Copy CPL TX PKT XT */
- pos = copy_cpltx_pktxt(skb, dev, pos);
+ pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
return pos;
}
@@ -438,11 +532,20 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- unsigned int immdatalen = 0;
unsigned int ivsize = GCM_ESP_IV_SIZE;
struct chcr_ipsec_wr *wr;
+ bool immediate = false;
+ u16 immdatalen = 0;
unsigned int flits;
+ u32 ivinoffset;
+ u32 aadstart;
+ u32 aadstop;
+ u32 ciphstart;
+ u16 sc_more = 0;
+ u32 ivdrop = 0;
+ u32 esnlen = 0;
u32 wr_mid;
+ u16 ndesc;
int qidx = skb_get_queue_mapping(skb);
struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
unsigned int kctx_len = sa_entry->kctx_len;
@@ -450,61 +553,76 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
atomic_inc(&adap->chcr_stats.ipsec_cnt);
- flits = calc_tx_sec_flits(skb, kctx_len);
+ flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
+ ndesc = DIV_ROUND_UP(flits, 2);
+ if (sa_entry->esn)
+ ivdrop = 1;
- if (is_eth_imm(skb, kctx_len))
+ if (immediate)
immdatalen = skb->len;
+ if (sa_entry->esn) {
+ esnlen = sizeof(struct chcr_ipsec_aadiv);
+ if (!skb_is_nonlinear(skb))
+ sc_more = 1;
+ }
+
/* WR Header */
wr = (struct chcr_ipsec_wr *)pos;
wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
- wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
netif_tx_stop_queue(q->txq);
q->q.stops++;
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (!q->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr_mid |= FW_ULPTX_WR_DATA_F;
wr->wreq.flowid_len16 = htonl(wr_mid);
/* ULPTX */
wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
- wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
+ wr->req.ulptx.len = htonl(ndesc - 1);
/* Sub-command */
- wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
sizeof(wr->req.key_ctx) +
kctx_len +
sizeof(struct cpl_tx_pkt_core) +
- immdatalen);
+ esnlen +
+ (esnlen ? 0 : immdatalen));
/* CPL_SEC_PDU */
+ ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1);
wr->req.sec_cpl.op_ivinsrtofst = htonl(
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(2) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) + 1)));
+ ivinoffset));
- wr->req.sec_cpl.pldlen = htonl(skb->len);
+ wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
+ aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
+ aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr));
+ ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1;
+ ciphstart += sa_entry->esn ? esnlen : 0;
wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- (skb_transport_offset(skb) + 1),
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr)),
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) +
- GCM_ESP_IV_SIZE + 1), 0);
+ aadstart,
+ aadstop,
+ ciphstart, 0);
wr->req.sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) +
- GCM_ESP_IV_SIZE + 1,
- sa_entry->authsize,
- sa_entry->authsize);
+ FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
+ sa_entry->authsize,
+ sa_entry->authsize);
wr->req.sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
@@ -512,7 +630,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
sa_entry->hmac_ctrl,
ivsize >> 1);
wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 0, 0);
+ 0, ivdrop, 0);
pos += sizeof(struct fw_ulptx_wr) +
sizeof(struct ulp_txpkt) +
@@ -565,7 +683,7 @@ int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipsec_sa_entry *sa_entry;
u64 *pos, *end, *before, *sgl;
int qidx, left, credits;
- unsigned int flits = 0, ndesc, kctx_len;
+ unsigned int flits = 0, ndesc;
struct adapter *adap;
struct sge_eth_txq *q;
struct port_info *pi;
@@ -576,7 +694,6 @@ int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
- kctx_len = sa_entry->kctx_len;
if (skb->sp->len != 1) {
out_free: dev_kfree_skb_any(skb);
@@ -590,7 +707,7 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_reclaim_completed_tx(adap, &q->q, true);
- flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -603,9 +720,6 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb, kctx_len))
- immediate = true;
-
if (!immediate &&
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 3c569bf286fe..a15bb4ba75ce 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -23,6 +23,7 @@
#include <linux/if_vlan.h>
#include <net/tcp.h>
#include <net/dst.h>
+#include <net/tls.h>
#include "chtls.h"
#include "chtls_cm.h"
@@ -614,7 +615,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
pi = netdev_priv(ndev);
adap = pi->adapter;
- if (!(adap->flags & FULL_INIT_DONE))
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE))
return -EBADF;
if (listen_hash_find(cdev, sk) >= 0) /* already have it */
@@ -1013,6 +1014,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
{
struct inet_sock *newinet;
const struct iphdr *iph;
+ struct tls_context *ctx;
struct net_device *ndev;
struct chtls_sock *csk;
struct dst_entry *dst;
@@ -1061,6 +1063,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
sk_setup_caps(newsk, dst);
+ ctx = tls_get_ctx(lsk);
+ newsk->sk_destruct = ctx->sk_destruct;
csk->sk = newsk;
csk->passive_reap_next = oreq;
csk->tx_chan = cxgb4_port_chan(ndev);
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 8b49394ad42f..6b0740acfd1d 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -922,14 +922,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
struct sock *sk, long *timeo_p)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int sndbuf, err = 0;
+ int err = 0;
long current_timeo;
long vm_wait = 0;
bool noblock;
current_timeo = *timeo_p;
noblock = (*timeo_p ? false : true);
- sndbuf = cdev->max_host_sndbuf;
if (csk_mem_free(cdev, sk)) {
current_timeo = (prandom_u32() % (HZ / 5)) + 2;
vm_wait = (prandom_u32() % (HZ / 5)) + 2;
@@ -1401,23 +1400,18 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
- struct net_device *dev = csk->egress_dev;
struct chtls_hws *hws = &csk->tlshws;
struct tcp_sock *tp = tcp_sk(sk);
- struct adapter *adap;
unsigned long avail;
int buffers_freed;
int copied = 0;
- int request;
int target;
long timeo;
- adap = netdev2adap(dev);
buffers_freed = 0;
timeo = sock_rcvtimeo(sk, nonblock);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- request = len;
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
chtls_cleanup_rbuf(sk, copied);
@@ -1694,11 +1688,9 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
- struct chtls_hws *hws;
unsigned long avail; /* amount of available data in current skb */
int buffers_freed;
int copied = 0;
- int request;
long timeo;
int target; /* Read at least this many bytes */
@@ -1718,7 +1710,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
- hws = &csk->tlshws;
if (is_tls_rx(csk))
return chtls_pt_recvmsg(sk, msg, len, nonblock,
@@ -1726,7 +1717,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
timeo = sock_rcvtimeo(sk, nonblock);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
- request = len;
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
chtls_cleanup_rbuf(sk, copied);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 563f8fe7686a..dd2daf2a54e0 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -30,7 +30,6 @@
*/
static LIST_HEAD(cdev_list);
static DEFINE_MUTEX(cdev_mutex);
-static DEFINE_MUTEX(cdev_list_lock);
static DEFINE_MUTEX(notify_mutex);
static RAW_NOTIFIER_HEAD(listen_notify_list);
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index b0cd5aff3822..5e85dfca8242 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -83,13 +83,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 668e285f1a64..bb01e62700af 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -86,13 +86,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 17f89cd983c4..7ff9c20f6043 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -88,8 +88,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 16f6c0cef4ac..f9c224192802 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -91,14 +91,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
- ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+ ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 1c4b5b889fba..1bfe867c0b7b 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* GHASH routines supporting VMX instructions on the Power 8
*
- * Copyright (C) 2015 International Business Machines Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 only.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright (C) 2015, 2019 International Business Machines Inc.
*
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ *
+ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
+ * mechanism. The new approach is based on arm64 code, which is:
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <linux/types.h>
@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
const u8 *in, size_t len);
struct p8_ghash_ctx {
+ /* key used by vector asm */
u128 htable[16];
- struct crypto_shash *fallback;
+ /* key used by software fallback */
+ be128 key;
};
struct p8_ghash_desc_ctx {
u64 shash[2];
u8 buffer[GHASH_DIGEST_SIZE];
int bytes;
- struct shash_desc fallback_desc;
};
-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
-{
- const char *alg = "ghash-generic";
- struct crypto_shash *fallback;
- struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
- return PTR_ERR(fallback);
- }
-
- crypto_shash_set_flags(fallback,
- crypto_shash_get_flags((struct crypto_shash
- *) tfm));
-
- /* Check if the descsize defined in the algorithm is still enough. */
- if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
- + crypto_shash_descsize(fallback)) {
- printk(KERN_ERR
- "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
- alg,
- shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
- crypto_shash_descsize(fallback));
- return -EINVAL;
- }
- ctx->fallback = fallback;
-
- return 0;
-}
-
-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
-{
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (ctx->fallback) {
- crypto_free_shash(ctx->fallback);
- ctx->fallback = NULL;
- }
-}
-
static int p8_ghash_init(struct shash_desc *desc)
{
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
dctx->bytes = 0;
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
- dctx->fallback_desc.tfm = ctx->fallback;
- dctx->fallback_desc.flags = desc->flags;
- return crypto_shash_init(&dctx->fallback_desc);
+ return 0;
}
static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- return crypto_shash_setkey(ctx->fallback, key, keylen);
+
+ memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static inline void __ghash_block(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx)
+{
+ if (!IN_INTERRUPT) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ dctx->buffer, GHASH_DIGEST_SIZE);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ } else {
+ crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
+ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
+ }
+}
+
+static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
+{
+ if (!IN_INTERRUPT) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ src, srclen);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ } else {
+ while (srclen >= GHASH_BLOCK_SIZE) {
+ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
+ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
+ srclen -= GHASH_BLOCK_SIZE;
+ src += GHASH_BLOCK_SIZE;
+ }
+ }
}
static int p8_ghash_update(struct shash_desc *desc,
@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc,
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
- return crypto_shash_update(&dctx->fallback_desc, src,
- srclen);
- } else {
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src,
- srclen);
- dctx->bytes += srclen;
- return 0;
- }
+ if (dctx->bytes) {
+ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_DIGEST_SIZE - dctx->bytes);
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- src += GHASH_DIGEST_SIZE - dctx->bytes;
- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
- if (len) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
+ srclen);
+ dctx->bytes += srclen;
+ return 0;
}
- return 0;
+ memcpy(dctx->buffer + dctx->bytes, src,
+ GHASH_DIGEST_SIZE - dctx->bytes);
+
+ __ghash_block(ctx, dctx);
+
+ src += GHASH_DIGEST_SIZE - dctx->bytes;
+ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+ dctx->bytes = 0;
+ }
+ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+ if (len) {
+ __ghash_blocks(ctx, dctx, src, len);
+ src += len;
+ srclen -= len;
}
+ if (srclen) {
+ memcpy(dctx->buffer, src, srclen);
+ dctx->bytes = srclen;
+ }
+ return 0;
}
static int p8_ghash_final(struct shash_desc *desc, u8 *out)
@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
- return crypto_shash_final(&dctx->fallback_desc, out);
- } else {
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
- dctx->buffer[i] = 0;
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- dctx->bytes = 0;
- }
- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
- return 0;
+ if (dctx->bytes) {
+ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+ dctx->buffer[i] = 0;
+ __ghash_block(ctx, dctx);
+ dctx->bytes = 0;
}
+ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+ return 0;
}
struct shash_alg p8_ghash_alg = {
@@ -215,11 +178,9 @@ struct shash_alg p8_ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
- .cra_init = p8_ghash_init_tfm,
- .cra_exit = p8_ghash_exit_tfm,
},
};
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index a4915099aaa9..a0e107abc40d 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -290,6 +290,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
+ if (!fwstr)
+ return ERR_PTR(-ENOMEM);
edidstr = fwstr;
while ((edidname = strsep(&edidstr, ","))) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 6f6e18a06818..9d839b4fd8f7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -124,6 +124,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
return;
etnaviv_dump_core = false;
+ mutex_lock(&gpu->mmu->lock);
+
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */
@@ -166,6 +168,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL);
if (!iter.start) {
+ mutex_unlock(&gpu->mmu->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@@ -233,6 +236,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
obj->base.size);
}
+ mutex_unlock(&gpu->mmu->lock);
+
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c725af28c6e5..798fd7aaf984 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -121,12 +121,13 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
{RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
- {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
- {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
- {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
- {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
- {RCS, TRVADR, 0, false}, /* 0x4df0 */
- {RCS, TRTTE, 0, false}, /* 0x4df4 */
+ {RCS, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */
+ {RCS, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */
+ {RCS, TRNULLDETCT, 0, true}, /* 0x4de8 */
+ {RCS, TRINVTILEDETCT, 0, true}, /* 0x4dec */
+ {RCS, TRVADR, 0, true}, /* 0x4df0 */
+ {RCS, TRTTE, 0, true}, /* 0x4df4 */
+ {RCS, _MMIO(0x4dfc), 0, true},
{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c89bc783fe48..3834aa71c9c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2733,6 +2733,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
if (view_type == vmw_view_max)
return -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
+ if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
+ DRM_ERROR("Invalid surface id.\n");
+ return -EINVAL;
+ }
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->sid, &srf_node);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c6ddf1af4b2a..5b91ec967a73 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -199,13 +199,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
* Add a usage to the temporary parser table.
*/
-static int hid_add_usage(struct hid_parser *parser, unsigned usage)
+static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
hid_err(parser->device, "usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
+ parser->local.usage_size[parser->local.usage_index] = size;
parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ?
parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
@@ -462,10 +463,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
- return hid_add_usage(parser, data);
+ return hid_add_usage(parser, data, item->size);
case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
@@ -474,9 +472,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
parser->local.usage_minimum = data;
return 0;
@@ -487,9 +482,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
count = data - parser->local.usage_minimum;
if (count + parser->local.usage_index >= HID_MAX_USAGES) {
/*
@@ -509,7 +501,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
for (n = parser->local.usage_minimum; n <= data; n++)
- if (hid_add_usage(parser, n)) {
+ if (hid_add_usage(parser, n, item->size)) {
dbg_hid("hid_add_usage failed\n");
return -1;
}
@@ -524,6 +516,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
/*
+ * Concatenate Usage Pages into Usages where relevant:
+ * As per specification, 6.2.2.8: "When the parser encounters a main item it
+ * concatenates the last declared Usage Page with a Usage to form a complete
+ * usage value."
+ */
+
+static void hid_concatenate_usage_page(struct hid_parser *parser)
+{
+ int i;
+
+ for (i = 0; i < parser->local.usage_index; i++)
+ if (parser->local.usage_size[i] <= 2)
+ parser->local.usage[i] += parser->global.usage_page << 16;
+}
+
+/*
* Process a main item.
*/
@@ -532,6 +540,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int ret;
+ hid_concatenate_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -741,6 +751,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int i;
+ hid_concatenate_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 0709980ad69b..843fbbb37db2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2407,6 +2407,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
return PTR_ERR(id);
id->tos = id_priv->tos;
+ id->tos_set = id_priv->tos_set;
id_priv->cm_id.iw = id;
memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
@@ -3795,6 +3796,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
return PTR_ERR(cm_id);
cm_id->tos = id_priv->tos;
+ cm_id->tos_set = id_priv->tos_set;
id_priv->cm_id.iw = cm_id;
memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 2fe0d621ae2c..0fe9d3452e96 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -549,7 +549,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = mhp->umem->page_shift;
- n = mhp->umem->nmap;
+ n = ib_umem_num_pages(mhp->umem);
err = iwch_alloc_pbl(mhp, n);
if (err)
@@ -1389,7 +1389,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.get_port_immutable = iwch_port_immutable;
dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
- dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
+ dev->ibdev.iwcm = kzalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 39215352e377..9e45e613e758 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -459,6 +459,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
skb_reset_transport_header(skb);
} else {
skb = alloc_skb(len, gfp);
+ if (!skb)
+ return NULL;
}
t4_set_arp_err_handler(skb, NULL, NULL);
return skb;
@@ -657,7 +659,33 @@ static int send_halfclose(struct c4iw_ep *ep)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-static int send_abort(struct c4iw_ep *ep)
+static void read_tcb(struct c4iw_ep *ep)
+{
+ struct sk_buff *skb;
+ struct cpl_get_tcb *req;
+ int wrlen = roundup(sizeof(*req), 16);
+
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (WARN_ON(!skb))
+ return;
+
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
+ req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
+ req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
+
+ /*
+ * keep a ref on the ep so the tcb is not unlocked before this
+ * cpl completes. The ref is released in read_tcb_rpl().
+ */
+ c4iw_get_ep(&ep->com);
+ if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
+ c4iw_put_ep(&ep->com);
+}
+
+static int send_abort_req(struct c4iw_ep *ep)
{
u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
@@ -672,6 +700,17 @@ static int send_abort(struct c4iw_ep *ep)
return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
}
+static int send_abort(struct c4iw_ep *ep)
+{
+ if (!ep->com.qp || !ep->com.qp->srq) {
+ send_abort_req(ep);
+ return 0;
+ }
+ set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
+ read_tcb(ep);
+ return 0;
+}
+
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req = NULL;
@@ -1853,14 +1892,11 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
-static void complete_cached_srq_buffers(struct c4iw_ep *ep,
- __be32 srqidx_status)
+static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
{
enum chip_type adapter_type;
- u32 srqidx;
adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(srqidx_status));
/*
* If this TCB had a srq buffer cached, then we must complete
@@ -1878,6 +1914,7 @@ static void complete_cached_srq_buffers(struct c4iw_ep *ep,
static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
+ u32 srqidx;
struct c4iw_ep *ep;
struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
int release = 0;
@@ -1889,7 +1926,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
- complete_cached_srq_buffers(ep, rpl->srqidx_status);
+ if (ep->com.qp && ep->com.qp->srq) {
+ srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status));
+ complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
+ }
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
@@ -1905,8 +1945,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
- if (release)
+ if (release) {
+ close_complete_upcall(ep, -ECONNRESET);
release_ep_resources(ep);
+ }
c4iw_put_ep(&ep->com);
return 0;
}
@@ -2074,7 +2116,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
} else {
pdev = get_real_dev(n->dev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
+ n, pdev, rt_tos2priority(tos));
if (!ep->l2t)
goto out;
ep->mtu = dst_mtu(dst);
@@ -2164,7 +2206,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
laddr6->sin6_port,
- raddr6->sin6_port, 0,
+ raddr6->sin6_port,
+ ep->com.cm_id->tos,
raddr6->sin6_scope_id);
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -2479,7 +2522,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
unsigned short hdrs;
- u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ u8 tos;
parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) {
@@ -2493,6 +2536,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
+ if (parent_ep->com.cm_id->tos_set)
+ tos = parent_ep->com.cm_id->tos;
+ else
+ tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+
cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
&iptype, local_ip, peer_ip, &local_port, &peer_port);
@@ -2512,7 +2560,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss);
dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ tos,
((struct sockaddr_in6 *)
&parent_ep->com.local_addr)->sin6_scope_id);
}
@@ -2743,6 +2791,21 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
+{
+ complete_cached_srq_buffers(ep, ep->srqe_idx);
+ if (ep->com.cm_id && ep->com.qp) {
+ struct c4iw_qp_attributes attrs;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ peer_abort_upcall(ep);
+ release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
+}
+
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss6 *req = cplhdr(skb);
@@ -2753,6 +2816,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
int release = 0;
unsigned int tid = GET_TID(req);
u8 status;
+ u32 srqidx;
u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
@@ -2772,8 +2836,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
goto deref_ep;
}
- complete_cached_srq_buffers(ep, req->srqidx_status);
-
pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
@@ -2821,6 +2883,23 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
stop_ep_timer(ep);
/*FALLTHROUGH*/
case FPDU_MODE:
+ if (ep->com.qp && ep->com.qp->srq) {
+ srqidx = ABORT_RSS_SRQIDX_G(
+ be32_to_cpu(req->srqidx_status));
+ if (srqidx) {
+ complete_cached_srq_buffers(ep,
+ req->srqidx_status);
+ } else {
+ /* Hold ep ref until finish_peer_abort() */
+ c4iw_get_ep(&ep->com);
+ __state_set(&ep->com, ABORTING);
+ set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
+ read_tcb(ep);
+ break;
+
+ }
+ }
+
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_ERROR;
ret = c4iw_modify_qp(ep->com.qp->rhp,
@@ -3320,7 +3399,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
laddr6->sin6_port,
- raddr6->sin6_port, 0,
+ raddr6->sin6_port, cm_id->tos,
raddr6->sin6_scope_id);
}
if (!ep->dst) {
@@ -3608,7 +3687,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (close) {
if (abrupt) {
set_bit(EP_DISC_ABORT, &ep->com.history);
- close_complete_upcall(ep, -ECONNRESET);
ret = send_abort(ep);
} else {
set_bit(EP_DISC_CLOSE, &ep->com.history);
@@ -3719,6 +3797,80 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
return;
}
+static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word)
+{
+ u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]);
+ u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]);
+ u64 t;
+ u32 shift = 32;
+
+ t = (thi << shift) | (tlo >> shift);
+
+ return t;
+}
+
+static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift)
+{
+ u32 v;
+ u64 t = be64_to_cpu(tcb[(31 - word) / 2]);
+
+ if (word & 0x1)
+ shift += 32;
+ v = (t >> shift) & mask;
+ return v;
+}
+
+static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_get_tcb_rpl *rpl = cplhdr(skb);
+ __be64 *tcb = (__be64 *)(rpl + 1);
+ unsigned int tid = GET_TID(rpl);
+ struct c4iw_ep *ep;
+ u64 t_flags_64;
+ u32 rx_pdu_out;
+
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+ /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to
+ * determine if there's a rx PDU feedback event pending.
+ *
+ * If that bit is set, it means we'll need to re-read the TCB's
+ * rq_start value. The final value is the one present in a TCB
+ * with the TF_RX_PDU_OUT bit cleared.
+ */
+
+ t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W);
+ rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S;
+
+ c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
+ c4iw_put_ep(&ep->com); /* from read_tcb() */
+
+ /* If TF_RX_PDU_OUT bit is set, re-read the TCB */
+ if (rx_pdu_out) {
+ if (++ep->rx_pdu_out_cnt >= 2) {
+ WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n");
+ goto cleanup;
+ }
+ read_tcb(ep);
+ return 0;
+ }
+
+ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
+ TCB_RQ_START_S);
+cleanup:
+ pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
+
+ if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
+ finish_peer_abort(dev, ep);
+ else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
+ send_abort_req(ep);
+ else
+ WARN_ONCE(1, "unexpected state!");
+
+ return 0;
+}
+
static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
@@ -4039,6 +4191,7 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack,
+ [CPL_GET_TCB_RPL] = read_tcb_rpl,
[CPL_FW6_MSG] = deferred_fw6_msg,
[CPL_RX_PKT] = rx_pkt,
[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
@@ -4270,6 +4423,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
+ [CPL_GET_TCB_RPL] = sched,
[CPL_FW6_MSG] = fw6_msg,
[CPL_RX_PKT] = sched
};
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 622a03e9bc30..53cc1d7adda4 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -590,7 +590,6 @@ struct c4iw_ucontext {
u32 key;
spinlock_t mmap_lock;
struct list_head mmaps;
- struct kref kref;
bool is_32b_cqe;
};
@@ -599,18 +598,6 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext);
}
-void _c4iw_free_ucontext(struct kref *kref);
-
-static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
-{
- kref_put(&ucontext->kref, _c4iw_free_ucontext);
-}
-
-static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
-{
- kref_get(&ucontext->kref);
-}
-
struct c4iw_mm_entry {
struct list_head entry;
u64 addr;
@@ -983,6 +970,9 @@ struct c4iw_ep {
int rcv_win;
u32 snd_wscale;
struct c4iw_ep_stats stats;
+ u32 srqe_idx;
+ u32 rx_pdu_out_cnt;
+ struct sk_buff *peer_abort_skb;
};
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7b76e6f81aeb..864d0facf3cc 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = mhp->umem->page_shift;
- n = mhp->umem->nmap;
+ n = ib_umem_num_pages(mhp->umem);
err = alloc_pbl(mhp, n);
if (err)
goto err_umem_release;
@@ -684,8 +684,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
c4iw_put_wr_wait(mhp->wr_waitp);
- kfree(mhp);
pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
+ kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index cbb3c0ddd990..46ec1ff19733 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -58,28 +58,19 @@ static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
-void _c4iw_free_ucontext(struct kref *kref)
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
- struct c4iw_ucontext *ucontext;
+ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp;
- ucontext = container_of(kref, struct c4iw_ucontext, kref);
+ pr_debug("context %p\n", context);
rhp = to_c4iw_dev(ucontext->ibucontext.device);
- pr_debug("ucontext %p\n", ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext);
-}
-
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
-{
- struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
-
- pr_debug("context %p\n", context);
- c4iw_put_ucontext(ucontext);
return 0;
}
@@ -102,7 +93,6 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
- kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
@@ -610,7 +600,7 @@ void c4iw_register_device(struct work_struct *work)
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
dev->ibdev.get_netdev = get_netdev;
- dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
+ dev->ibdev.iwcm = kzalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm) {
ret = -ENOMEM;
goto err_dealloc_ctx;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 4cc7d3a6a6de..1ad3c657e715 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(db_coalescing_threshold,
static int max_fr_immd = T4_MAX_FR_IMMD;
module_param(max_fr_immd, int, 0644);
-MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
+MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
static int alloc_ird(struct c4iw_dev *dev, u32 ird)
{
@@ -632,7 +632,10 @@ static void build_rdma_write_cmpl(struct t4_sq *sq,
wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
- wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
+ if (wr->next->opcode == IB_WR_SEND)
+ wcwr->stag_inv = 0;
+ else
+ wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
wcwr->r2 = 0;
wcwr->r3 = 0;
@@ -726,7 +729,10 @@ static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
/* SEND_WITH_INV swsqe */
swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
- swsqe->opcode = FW_RI_SEND_WITH_INV;
+ if (wr->next->opcode == IB_WR_SEND)
+ swsqe->opcode = FW_RI_SEND;
+ else
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
swsqe->idx = qhp->wq.sq.pidx;
swsqe->complete = 0;
swsqe->signaled = send_signaled;
@@ -897,8 +903,6 @@ static void free_qp_work(struct work_struct *work)
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
- if (ucontext)
- c4iw_put_ucontext(ucontext);
c4iw_put_wr_wait(qhp->wr_waitp);
kfree(qhp);
}
@@ -1133,9 +1137,9 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
/*
* Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
* the response for small NVMEe-oF READ requests. If the chain is
- * exactly a WRITE->SEND_WITH_INV and the sgl depths and lengths
- * meet the requirements of the fw_ri_write_cmpl_wr work request,
- * then build and post the write_cmpl WR. If any of the tests
+ * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
+ * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
+ * request, then build and post the write_cmpl WR. If any of the tests
* below are not true, then we continue on with the tradtional WRITE
* and SEND WRs.
*/
@@ -1145,7 +1149,8 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
wr && wr->next && !wr->next->next &&
wr->opcode == IB_WR_RDMA_WRITE &&
wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
- wr->next->opcode == IB_WR_SEND_WITH_INV &&
+ (wr->next->opcode == IB_WR_SEND ||
+ wr->next->opcode == IB_WR_SEND_WITH_INV) &&
wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
wr->next->num_sge == 1 && num_wrs >= 2) {
post_write_cmpl(qhp, wr);
@@ -1970,10 +1975,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ disconnect = 1;
if (!internal) {
- c4iw_get_ep(&qhp->ep->com);
terminate = 1;
- disconnect = 1;
} else {
terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
@@ -2331,7 +2336,6 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
insert_mmap(ucontext, ma_sync_key_mm);
}
- c4iw_get_ucontext(ucontext);
qhp->ucontext = ucontext;
}
if (!attrs->srq) {
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index fff6d48d262f..b170817b2741 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -35,6 +35,7 @@
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
+#include "t4_tcb.h"
#include "t4fw_ri_api.h"
#define T4_MAX_NUM_PD 65536
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 55e9d2f62e5c..d6f9d70af87d 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1072,6 +1072,8 @@ static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
int msecs);
+static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
+ int msecs);
static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
static void handle_temp_err(struct hfi1_devdata *dd);
@@ -4102,6 +4104,9 @@ def_access_ibp_counter(seq_naks);
static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
+[C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
+[C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
+[C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
CNTR_NORMAL),
[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
@@ -10771,13 +10776,15 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
break;
ppd->port_error_action = 0;
- ppd->host_link_state = HLS_DN_POLL;
if (quick_linkup) {
/* quick linkup does not go into polling */
ret = do_quick_linkup(dd);
} else {
ret1 = set_physical_link_state(dd, PLS_POLLING);
+ if (!ret1)
+ ret1 = wait_phys_link_out_of_offline(ppd,
+ 3000);
if (ret1 != HCMD_SUCCESS) {
dd_dev_err(dd,
"Failed to transition to Polling link state, return 0x%x\n",
@@ -10785,6 +10792,14 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
ret = -EINVAL;
}
}
+
+ /*
+ * Change the host link state after requesting DC8051 to
+ * change its physical state so that we can ignore any
+ * interrupt with stale LNI(XX) error, which will not be
+ * cleared until DC8051 transitions to Polling state.
+ */
+ ppd->host_link_state = HLS_DN_POLL;
ppd->offline_disabled_reason =
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
/*
@@ -12931,6 +12946,39 @@ static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
return read_state;
}
+/*
+ * wait_phys_link_out_of_offline - wait for any out of offline state
+ * @ppd: port device
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for any out of offline physical link
+ * state change to occur.
+ * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
+ */
+static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
+ int msecs)
+{
+ u32 read_state;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ read_state = read_physical_state(ppd->dd);
+ if ((read_state & 0xF0) != PLS_OFFLINE)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
+ read_state, msecs);
+ return -ETIMEDOUT;
+ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
+ log_state_transition(ppd, read_state);
+ return read_state;
+}
+
#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
@@ -13177,7 +13225,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int total_contexts;
int ret;
unsigned ngroups;
- int qos_rmt_count;
+ int rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
@@ -13239,10 +13287,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = rcv_contexts - total_contexts;
}
- /* each user context requires an entry in the RMT */
- qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
- if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
- user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+ /*
+ * The RMT entries are currently allocated as shown below:
+ * 1. QOS (0 to 128 entries);
+ * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+ * 3. VNIC (num_vnic_contexts).
+ * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+ * entries of RMT because both VNIC and PSM could allocate any receive
+ * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+ * and PSM FECN must reserve an RMT entry for each possible PSM receive
+ * context.
+ */
+ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+ if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+ user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd,
"RMT size is reducing the number of user receive contexts from %u to %d\n",
n_usr_ctxts,
@@ -14230,9 +14288,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
u64 reg;
int i, idx, regoff, regidx;
u8 offset;
+ u32 total_cnt;
/* there needs to be enough room in the map table */
- if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+ total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
return;
}
@@ -14286,7 +14346,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
/* add rule 1 */
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
- rmt->used += dd->num_user_contexts;
+ rmt->used += total_cnt;
}
/* Initialize RSM for VNIC */
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 6b9c8f12dff8..87a149410d45 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -857,6 +857,9 @@ static inline int idx_from_vl(int vl)
/* Per device counter indexes */
enum {
C_RCV_OVF = 0,
+ C_RX_LEN_ERR,
+ C_RX_ICRC_ERR,
+ C_RX_EBP,
C_RX_TID_FULL,
C_RX_TID_INVALID,
C_RX_TID_FLGMS,
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index c6163a347e93..ab3589d17aee 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -380,6 +380,9 @@
#define DC_LCB_PRF_TX_FLIT_CNT (DC_LCB_CSRS + 0x000000000418)
#define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
#define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
+#define RCV_LENGTH_ERR_CNT 0
+#define RCV_ICRC_ERR_CNT 6
+#define RCV_EBP_CNT 9
#define RCV_BUF_OVFL_CNT 10
#define RCV_CONTEXT_EGR_STALL 22
#define RCV_DATA_PKT_CNT 0
@@ -935,6 +938,10 @@
#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_MASK 0x7FFull
#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SHIFT 0
#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SMASK 0x7FFull
+#define SEND_CTXT_CREDIT_STATUS (TXE + 0x000000100018)
+#define SEND_CTXT_CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK 0x7FFull
+#define SEND_CTXT_CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT 32
+#define SEND_CTXT_CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK 0x7FFull
#define SEND_CTXT_CREDIT_FORCE (TXE + 0x000000100028)
#define SEND_CTXT_CREDIT_FORCE_FORCE_RETURN_SMASK 0x1ull
#define SEND_CTXT_CREDIT_RETURN_ADDR (TXE + 0x000000100020)
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 9f992ae36c89..5358ce92ad3e 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -407,6 +407,54 @@ DEBUGFS_SEQ_FILE_OPS(rcds);
DEBUGFS_SEQ_FILE_OPEN(rcds)
DEBUGFS_FILE_OPS(rcds);
+static void *_pios_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd;
+ struct hfi1_devdata *dd;
+
+ ibd = (struct hfi1_ibdev *)s->private;
+ dd = dd_from_dev(ibd);
+ if (!dd->send_contexts || *pos >= dd->num_send_contexts)
+ return NULL;
+ return pos;
+}
+
+static void *_pios_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ ++*pos;
+ if (!dd->send_contexts || *pos >= dd->num_send_contexts)
+ return NULL;
+ return pos;
+}
+
+static void _pios_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _pios_seq_show(struct seq_file *s, void *v)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct send_context_info *sci;
+ loff_t *spos = v;
+ loff_t i = *spos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sc_lock, flags);
+ sci = &dd->send_contexts[i];
+ if (sci && sci->type != SC_USER && sci->allocated && sci->sc)
+ seqfile_dump_sci(s, i, sci);
+ spin_unlock_irqrestore(&dd->sc_lock, flags);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(pios);
+DEBUGFS_SEQ_FILE_OPEN(pios)
+DEBUGFS_FILE_OPS(pios);
+
/* read the per-device counters */
static ssize_t dev_counters_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -1032,6 +1080,77 @@ static int qsfp2_debugfs_release(struct inode *in, struct file *fp)
return __qsfp_debugfs_release(in, fp, 1);
}
+#define EXPROM_WRITE_ENABLE BIT_ULL(14)
+
+static bool exprom_wp_disabled;
+
+static int exprom_wp_set(struct hfi1_devdata *dd, bool disable)
+{
+ u64 gpio_val = 0;
+
+ if (disable) {
+ gpio_val = EXPROM_WRITE_ENABLE;
+ exprom_wp_disabled = true;
+ dd_dev_info(dd, "Disable Expansion ROM Write Protection\n");
+ } else {
+ exprom_wp_disabled = false;
+ dd_dev_info(dd, "Enable Expansion ROM Write Protection\n");
+ }
+
+ write_csr(dd, ASIC_GPIO_OUT, gpio_val);
+ write_csr(dd, ASIC_GPIO_OE, gpio_val);
+
+ return 0;
+}
+
+static ssize_t exprom_wp_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t exprom_wp_debugfs_write(struct file *file,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd = private2ppd(file);
+ char cdata;
+
+ if (count != 1)
+ return -EINVAL;
+ if (get_user(cdata, buf))
+ return -EFAULT;
+ if (cdata == '0')
+ exprom_wp_set(ppd->dd, false);
+ else if (cdata == '1')
+ exprom_wp_set(ppd->dd, true);
+ else
+ return -EINVAL;
+
+ return 1;
+}
+
+static unsigned long exprom_in_use;
+
+static int exprom_wp_debugfs_open(struct inode *in, struct file *fp)
+{
+ if (test_and_set_bit(0, &exprom_in_use))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int exprom_wp_debugfs_release(struct inode *in, struct file *fp)
+{
+ struct hfi1_pportdata *ppd = private2ppd(fp);
+
+ if (exprom_wp_disabled)
+ exprom_wp_set(ppd->dd, false);
+ clear_bit(0, &exprom_in_use);
+
+ return 0;
+}
+
#define DEBUGFS_OPS(nm, readroutine, writeroutine) \
{ \
.name = nm, \
@@ -1071,6 +1190,9 @@ static const struct counter_info port_cntr_ops[] = {
qsfp1_debugfs_open, qsfp1_debugfs_release),
DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
qsfp2_debugfs_open, qsfp2_debugfs_release),
+ DEBUGFS_XOPS("exprom_wp", exprom_wp_debugfs_read,
+ exprom_wp_debugfs_write, exprom_wp_debugfs_open,
+ exprom_wp_debugfs_release),
DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL),
DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write),
@@ -1119,6 +1241,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
char link[10];
struct hfi1_devdata *dd = dd_from_dev(ibd);
struct hfi1_pportdata *ppd;
+ struct dentry *root;
int unit = dd->unit;
int i, j;
@@ -1126,30 +1249,29 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
return;
snprintf(name, sizeof(name), "%s_%d", class_name(), unit);
snprintf(link, sizeof(link), "%d", unit);
- ibd->hfi1_ibdev_dbg = debugfs_create_dir(name, hfi1_dbg_root);
- if (!ibd->hfi1_ibdev_dbg) {
- pr_warn("create of %s failed\n", name);
- return;
- }
+ root = debugfs_create_dir(name, hfi1_dbg_root);
+ ibd->hfi1_ibdev_dbg = root;
+
ibd->hfi1_ibdev_link =
debugfs_create_symlink(link, hfi1_dbg_root, name);
- if (!ibd->hfi1_ibdev_link) {
- pr_warn("create of %s symlink failed\n", name);
- return;
- }
- DEBUGFS_SEQ_FILE_CREATE(opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(tx_opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(rcds, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(sdma_cpu_list, ibd->hfi1_ibdev_dbg, ibd);
+
+ debugfs_create_file("opcode_stats", 0444, root, ibd,
+ &_opcode_stats_file_ops);
+ debugfs_create_file("tx_opcode_stats", 0444, root, ibd,
+ &_tx_opcode_stats_file_ops);
+ debugfs_create_file("ctx_stats", 0444, root, ibd, &_ctx_stats_file_ops);
+ debugfs_create_file("qp_stats", 0444, root, ibd, &_qp_stats_file_ops);
+ debugfs_create_file("sdes", 0444, root, ibd, &_sdes_file_ops);
+ debugfs_create_file("rcds", 0444, root, ibd, &_rcds_file_ops);
+ debugfs_create_file("pios", 0444, root, ibd, &_pios_file_ops);
+ debugfs_create_file("sdma_cpu_list", 0444, root, ibd,
+ &_sdma_cpu_list_file_ops);
+
/* dev counter files */
for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
- DEBUGFS_FILE_CREATE(cntr_ops[i].name,
- ibd->hfi1_ibdev_dbg,
- dd,
- &cntr_ops[i].ops, S_IRUGO);
+ debugfs_create_file(cntr_ops[i].name, 0444, root, dd,
+ &cntr_ops[i].ops);
+
/* per port files */
for (ppd = dd->pport, j = 0; j < dd->num_pports; j++, ppd++)
for (i = 0; i < ARRAY_SIZE(port_cntr_ops); i++) {
@@ -1157,12 +1279,11 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
sizeof(name),
port_cntr_ops[i].name,
j + 1);
- DEBUGFS_FILE_CREATE(name,
- ibd->hfi1_ibdev_dbg,
- ppd,
- &port_cntr_ops[i].ops,
+ debugfs_create_file(name,
!port_cntr_ops[i].ops.write ?
- S_IRUGO : S_IRUGO | S_IWUSR);
+ S_IRUGO :
+ S_IRUGO | S_IWUSR,
+ root, ppd, &port_cntr_ops[i].ops);
}
hfi1_fault_init_debugfs(ibd);
@@ -1292,10 +1413,10 @@ DEBUGFS_FILE_OPS(driver_stats);
void hfi1_dbg_init(void)
{
hfi1_dbg_root = debugfs_create_dir(DRIVER_NAME, NULL);
- if (!hfi1_dbg_root)
- pr_warn("init of debugfs failed\n");
- DEBUGFS_SEQ_FILE_CREATE(driver_stats_names, hfi1_dbg_root, NULL);
- DEBUGFS_SEQ_FILE_CREATE(driver_stats, hfi1_dbg_root, NULL);
+ debugfs_create_file("driver_stats_names", 0444, hfi1_dbg_root, NULL,
+ &_driver_stats_names_file_ops);
+ debugfs_create_file("driver_stats", 0444, hfi1_dbg_root, NULL,
+ &_driver_stats_file_ops);
}
void hfi1_dbg_exit(void)
diff --git a/drivers/infiniband/hw/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index d5d824459fcc..57e582caa5eb 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
@@ -49,16 +49,6 @@
struct hfi1_ibdev;
-#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
-do { \
- struct dentry *ent; \
- const char *__name = name; \
- ent = debugfs_create_file(__name, mode, parent, \
- data, ops); \
- if (!ent) \
- pr_warn("create of %s failed\n", __name); \
-} while (0)
-
#define DEBUGFS_SEQ_FILE_OPS(name) \
static const struct seq_operations _##name##_seq_ops = { \
.start = _##name##_seq_start, \
@@ -89,8 +79,6 @@ static const struct file_operations _##name##_file_ops = { \
.release = seq_release \
}
-#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
- DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, 0444)
ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size,
loff_t *ppos);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 98a12bf06365..b30eb14a39b4 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -430,40 +430,60 @@ static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
[HFI1_PKT_TYPE_16B] = &return_cnp_16B
};
-void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
- bool do_cnp)
+/**
+ * hfi1_process_ecn_slowpath - Process FECN or BECN bits
+ * @qp: The packet's destination QP
+ * @pkt: The packet itself.
+ * @prescan: Is the caller the RXQ prescan
+ *
+ * Process the packet's FECN or BECN bits. By now, the packet
+ * has already been evaluated whether processing of those bit should
+ * be done.
+ * The significance of the @prescan argument is that if the caller
+ * is the RXQ prescan, a CNP will be send out instead of waiting for the
+ * normal packet processing to send an ACK with BECN set (or a CNP).
+ */
+bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool prescan)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_other_headers *ohdr = pkt->ohdr;
struct ib_grh *grh = pkt->grh;
- u32 rqpn = 0, bth1;
+ u32 rqpn = 0;
u16 pkey;
u32 rlid, slid, dlid = 0;
- u8 hdr_type, sc, svc_type;
- bool is_mcast = false;
+ u8 hdr_type, sc, svc_type, opcode;
+ bool is_mcast = false, ignore_fecn = false, do_cnp = false,
+ fecn, becn;
/* can be called from prescan */
if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
- is_mcast = hfi1_is_16B_mcast(dlid);
pkey = hfi1_16B_get_pkey(pkt->hdr);
sc = hfi1_16B_get_sc(pkt->hdr);
dlid = hfi1_16B_get_dlid(pkt->hdr);
slid = hfi1_16B_get_slid(pkt->hdr);
+ is_mcast = hfi1_is_16B_mcast(dlid);
+ opcode = ib_bth_get_opcode(ohdr);
hdr_type = HFI1_PKT_TYPE_16B;
+ fecn = hfi1_16B_get_fecn(pkt->hdr);
+ becn = hfi1_16B_get_becn(pkt->hdr);
} else {
- is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
- (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
pkey = ib_bth_get_pkey(ohdr);
sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
- dlid = ib_get_dlid(pkt->hdr);
+ dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
+ ppd->lid;
slid = ib_get_slid(pkt->hdr);
+ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
+ opcode = ib_bth_get_opcode(ohdr);
hdr_type = HFI1_PKT_TYPE_9B;
+ fecn = ib_bth_get_fecn(ohdr);
+ becn = ib_bth_get_becn(ohdr);
}
switch (qp->ibqp.qp_type) {
case IB_QPT_UD:
- dlid = ppd->lid;
rlid = slid;
rqpn = ib_get_sqpn(pkt->ohdr);
svc_type = IB_CC_SVCTYPE_UD;
@@ -485,22 +505,31 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
svc_type = IB_CC_SVCTYPE_RC;
break;
default:
- return;
+ return false;
}
- bth1 = be32_to_cpu(ohdr->bth[1]);
+ ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
+ (opcode == IB_OPCODE_RC_ACKNOWLEDGE);
+ /*
+ * ACKNOWLEDGE packets do not get a CNP but this will be
+ * guarded by ignore_fecn above.
+ */
+ do_cnp = prescan ||
+ (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
+ opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE);
+
/* Call appropriate CNP handler */
- if (do_cnp && (bth1 & IB_FECN_SMASK))
+ if (!ignore_fecn && do_cnp && fecn)
hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
dlid, rlid, sc, grh);
- if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
- u32 lqpn = bth1 & RVT_QPN_MASK;
+ if (becn) {
+ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
u8 sl = ibp->sc_to_sl[sc];
process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
}
-
+ return !ignore_fecn && fecn;
}
struct ps_mdata {
@@ -599,7 +628,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
- int is_ecn = 0;
u8 lnh;
if (ps_done(&mdata, rhf, rcd))
@@ -625,12 +653,10 @@ static void __prescan_rxq(struct hfi1_packet *packet)
goto next; /* just in case */
}
- bth1 = be32_to_cpu(packet->ohdr->bth[1]);
- is_ecn = !!(bth1 & (IB_FECN_SMASK | IB_BECN_SMASK));
-
- if (!is_ecn)
+ if (!hfi1_may_ecn(packet))
goto next;
+ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
qpn = bth1 & RVT_QPN_MASK;
rcu_read_lock();
qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
@@ -640,7 +666,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
goto next;
}
- process_ecn(qp, packet, true);
+ hfi1_process_ecn_slowpath(qp, packet, true);
rcu_read_unlock();
/* turn off BECN, FECN */
@@ -1401,7 +1427,7 @@ static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
(packet->dlid !=
opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
- if (packet->dlid != ppd->lid)
+ if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
index 1be49a0d9c11..e9d5cc8b771a 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -112,9 +112,6 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
*/
void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
{
- WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_full_list));
- WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_used_list));
-
kfree(rcd->groups);
rcd->groups = NULL;
hfi1_exp_tid_group_init(rcd);
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index e2290f32c8d9..3fd3315d0fb0 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -250,6 +250,7 @@ void hfi1_fault_exit_debugfs(struct hfi1_ibdev *ibd)
int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
{
struct dentry *parent = ibd->hfi1_ibdev_dbg;
+ struct dentry *fault_dir;
ibd->fault = kzalloc(sizeof(*ibd->fault), GFP_KERNEL);
if (!ibd->fault)
@@ -269,45 +270,31 @@ int hfi1_fault_init_debugfs(struct hfi1_ibdev *ibd)
bitmap_zero(ibd->fault->opcodes,
sizeof(ibd->fault->opcodes) * BITS_PER_BYTE);
- ibd->fault->dir =
- fault_create_debugfs_attr("fault", parent,
- &ibd->fault->attr);
- if (IS_ERR(ibd->fault->dir)) {
+ fault_dir =
+ fault_create_debugfs_attr("fault", parent, &ibd->fault->attr);
+ if (IS_ERR(fault_dir)) {
kfree(ibd->fault);
ibd->fault = NULL;
return -ENOENT;
}
-
- DEBUGFS_SEQ_FILE_CREATE(fault_stats, ibd->fault->dir, ibd);
- if (!debugfs_create_bool("enable", 0600, ibd->fault->dir,
- &ibd->fault->enable))
- goto fail;
- if (!debugfs_create_bool("suppress_err", 0600,
- ibd->fault->dir,
- &ibd->fault->suppress_err))
- goto fail;
- if (!debugfs_create_bool("opcode_mode", 0600, ibd->fault->dir,
- &ibd->fault->opcode))
- goto fail;
- if (!debugfs_create_file("opcodes", 0600, ibd->fault->dir,
- ibd->fault, &__fault_opcodes_fops))
- goto fail;
- if (!debugfs_create_u64("skip_pkts", 0600,
- ibd->fault->dir,
- &ibd->fault->fault_skip))
- goto fail;
- if (!debugfs_create_u64("skip_usec", 0600,
- ibd->fault->dir,
- &ibd->fault->fault_skip_usec))
- goto fail;
- if (!debugfs_create_u8("direction", 0600, ibd->fault->dir,
- &ibd->fault->direction))
- goto fail;
+ ibd->fault->dir = fault_dir;
+
+ debugfs_create_file("fault_stats", 0444, fault_dir, ibd,
+ &_fault_stats_file_ops);
+ debugfs_create_bool("enable", 0600, fault_dir, &ibd->fault->enable);
+ debugfs_create_bool("suppress_err", 0600, fault_dir,
+ &ibd->fault->suppress_err);
+ debugfs_create_bool("opcode_mode", 0600, fault_dir,
+ &ibd->fault->opcode);
+ debugfs_create_file("opcodes", 0600, fault_dir, ibd->fault,
+ &__fault_opcodes_fops);
+ debugfs_create_u64("skip_pkts", 0600, fault_dir,
+ &ibd->fault->fault_skip);
+ debugfs_create_u64("skip_usec", 0600, fault_dir,
+ &ibd->fault->fault_skip_usec);
+ debugfs_create_u8("direction", 0600, fault_dir, &ibd->fault->direction);
return 0;
-fail:
- hfi1_fault_exit_debugfs(ibd);
- return -ENOMEM;
}
bool hfi1_dbg_fault_suppress_err(struct hfi1_ibdev *ibd)
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 4dbb66d0d057..0a41201f7983 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
vmf = 1;
break;
case STATUS:
- if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
+ if (flags & VM_WRITE) {
ret = -EPERM;
goto done;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index b562ca35dc2b..15ec3e1feb09 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1804,13 +1804,20 @@ static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd)
return &rcd->ppd->ibport_data;
}
-void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
- bool do_cnp);
-static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
- bool do_cnp)
+/**
+ * hfi1_may_ecn - Check whether FECN or BECN processing should be done
+ * @pkt: the packet to be evaluated
+ *
+ * Check whether the FECN or BECN bits in the packet's header are
+ * enabled, depending on packet type.
+ *
+ * This function only checks for FECN and BECN bits. Additional checks
+ * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to
+ * ensure correct handling.
+ */
+static inline bool hfi1_may_ecn(struct hfi1_packet *pkt)
{
- bool becn;
- bool fecn;
+ bool fecn, becn;
if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
fecn = hfi1_16B_get_fecn(pkt->hdr);
@@ -1819,10 +1826,18 @@ static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
fecn = ib_bth_get_fecn(pkt->ohdr);
becn = ib_bth_get_becn(pkt->ohdr);
}
- if (unlikely(fecn || becn)) {
- hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
- return fecn;
- }
+ return fecn || becn;
+}
+
+bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool prescan);
+static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt)
+{
+ bool do_work;
+
+ do_work = hfi1_may_ecn(pkt);
+ if (unlikely(do_work))
+ return hfi1_process_ecn_slowpath(qp, pkt, false);
return false;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 340a2cefbae4..b1e509d51e2e 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -466,7 +466,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
if (rcd->egrbufs.size < hfi1_max_mtu) {
rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
hfi1_cdbg(PROC,
- "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
+ "ctxt%u: eager bufs size too small. Adjusting to %u\n",
rcd->ctxt, rcd->egrbufs.size);
}
rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
@@ -797,7 +797,8 @@ static int create_workqueues(struct hfi1_devdata *dd)
ppd->hfi1_wq =
alloc_workqueue(
"hfi%d_%d",
- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM,
HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
dd->unit, pidx);
if (!ppd->hfi1_wq)
@@ -2052,7 +2053,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
rcd->egrbufs.size = alloced_bytes;
hfi1_cdbg(PROC,
- "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
+ "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
rcd->ctxt, rcd->egrbufs.alloced,
rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 9ab50d2308dc..dd5a5c030066 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -742,6 +742,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
spin_lock_init(&sc->alloc_lock);
spin_lock_init(&sc->release_lock);
spin_lock_init(&sc->credit_ctrl_lock);
+ seqlock_init(&sc->waitlock);
INIT_LIST_HEAD(&sc->piowait);
INIT_WORK(&sc->halt_work, sc_halted);
init_waitqueue_head(&sc->halt_wait);
@@ -1593,7 +1594,6 @@ void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
static void sc_piobufavail(struct send_context *sc)
{
struct hfi1_devdata *dd = sc->dd;
- struct hfi1_ibdev *dev = &dd->verbs_dev;
struct list_head *list;
struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
struct rvt_qp *qp;
@@ -1612,7 +1612,7 @@ static void sc_piobufavail(struct send_context *sc)
* could end up with QPs on the wait list with the interrupt
* disabled.
*/
- write_seqlock_irqsave(&dev->iowait_lock, flags);
+ write_seqlock_irqsave(&sc->waitlock, flags);
while (!list_empty(list)) {
struct iowait *wait;
@@ -1636,7 +1636,7 @@ static void sc_piobufavail(struct send_context *sc)
if (!list_empty(list))
hfi1_sc_wantpiobuf_intr(sc, 1);
}
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+ write_sequnlock_irqrestore(&sc->waitlock, flags);
/* Wake up the most starved one first */
if (n)
@@ -2137,3 +2137,28 @@ void free_credit_return(struct hfi1_devdata *dd)
kfree(dd->cr_base);
dd->cr_base = NULL;
}
+
+void seqfile_dump_sci(struct seq_file *s, u32 i,
+ struct send_context_info *sci)
+{
+ struct send_context *sc = sci->sc;
+ u64 reg;
+
+ seq_printf(s, "SCI %u: type %u base %u credits %u\n",
+ i, sci->type, sci->base, sci->credits);
+ seq_printf(s, " flags 0x%x sw_inx %u hw_ctxt %u grp %u\n",
+ sc->flags, sc->sw_index, sc->hw_context, sc->group);
+ seq_printf(s, " sr_size %u credits %u sr_head %u sr_tail %u\n",
+ sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail);
+ seq_printf(s, " fill %lu free %lu fill_wrap %u alloc_free %lu\n",
+ sc->fill, sc->free, sc->fill_wrap, sc->alloc_free);
+ seq_printf(s, " credit_intr_count %u credit_ctrl 0x%llx\n",
+ sc->credit_intr_count, sc->credit_ctrl);
+ reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
+ seq_printf(s, " *hw_free %llu CurrentFree %llu LastReturned %llu\n",
+ (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >>
+ CR_COUNTER_SHIFT,
+ (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) &
+ SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK),
+ reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK));
+}
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index aaf372c3e5d6..c9a58b642bdd 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -127,6 +127,8 @@ struct send_context {
volatile __le64 *hw_free; /* HW free counter */
/* list for PIO waiters */
struct list_head piowait ____cacheline_aligned_in_smp;
+ seqlock_t waitlock;
+
spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
u32 credit_intr_count; /* count of credit intr users */
u64 credit_ctrl; /* cache for credit control */
@@ -329,4 +331,7 @@ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
void seg_pio_copy_end(struct pio_buf *pbuf);
+void seqfile_dump_sci(struct seq_file *s, u32 i,
+ struct send_context_info *sci);
+
#endif /* _PIO_H */
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 1a016248039f..5866f358ea04 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -375,20 +375,18 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
static void qp_pio_drain(struct rvt_qp *qp)
{
- struct hfi1_ibdev *dev;
struct hfi1_qp_priv *priv = qp->priv;
if (!priv->s_sendcontext)
return;
- dev = to_idev(qp->ibqp.device);
while (iowait_pio_pending(&priv->s_iowait)) {
- write_seqlock_irq(&dev->iowait_lock);
+ write_seqlock_irq(&priv->s_sendcontext->waitlock);
hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
- write_sequnlock_irq(&dev->iowait_lock);
+ write_sequnlock_irq(&priv->s_sendcontext->waitlock);
iowait_pio_drain(&priv->s_iowait);
- write_seqlock_irq(&dev->iowait_lock);
+ write_seqlock_irq(&priv->s_sendcontext->waitlock);
hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
- write_sequnlock_irq(&dev->iowait_lock);
+ write_sequnlock_irq(&priv->s_sendcontext->waitlock);
}
}
@@ -459,7 +457,6 @@ static int iowait_sleep(
struct hfi1_qp_priv *priv;
unsigned long flags;
int ret = 0;
- struct hfi1_ibdev *dev;
qp = tx->qp;
priv = qp->priv;
@@ -472,9 +469,8 @@ static int iowait_sleep(
* buffer and undoing the side effects of the copy.
*/
/* Make a common routine? */
- dev = &sde->dd->verbs_dev;
list_add_tail(&stx->list, &wait->tx_head);
- write_seqlock(&dev->iowait_lock);
+ write_seqlock(&sde->waitlock);
if (sdma_progress(sde, seq, stx))
goto eagain;
if (list_empty(&priv->s_iowait.list)) {
@@ -485,11 +481,11 @@ static int iowait_sleep(
qp->s_flags |= RVT_S_WAIT_DMA_DESC;
iowait_queue(pkts_sent, &priv->s_iowait,
&sde->dmawait);
- priv->s_iowait.lock = &dev->iowait_lock;
+ priv->s_iowait.lock = &sde->waitlock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
rvt_get_qp(qp);
}
- write_sequnlock(&dev->iowait_lock);
+ write_sequnlock(&sde->waitlock);
hfi1_qp_unbusy(qp, wait);
spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EBUSY;
@@ -499,7 +495,7 @@ static int iowait_sleep(
}
return ret;
eagain:
- write_sequnlock(&dev->iowait_lock);
+ write_sequnlock(&sde->waitlock);
spin_unlock_irqrestore(&qp->s_lock, flags);
list_del_init(&stx->list);
return -EAGAIN;
@@ -837,7 +833,7 @@ void notify_error_qp(struct rvt_qp *qp)
write_seqlock(lock);
if (!list_empty(&priv->s_iowait.list) &&
!(qp->s_flags & RVT_S_BUSY)) {
- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
rvt_put_qp(qp);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 188aa4f686a0..cfde43b1df96 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1157,6 +1157,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
+ rvt_qp_wqe_unreserve(qp, wqe);
s_last = qp->s_last;
trace_hfi1_qp_send_completion(qp, wqe, s_last);
if (++s_last >= qp->s_size)
@@ -1209,6 +1210,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
u32 s_last;
rvt_put_swqe(wqe);
+ rvt_qp_wqe_unreserve(qp, wqe);
s_last = qp->s_last;
trace_hfi1_qp_send_completion(qp, wqe, s_last);
if (++s_last >= qp->s_size)
@@ -2049,8 +2051,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
struct ib_reth *reth;
unsigned long flags;
int ret;
- bool is_fecn = false;
- bool copy_last = false;
+ bool copy_last = false, fecn;
u32 rkey;
u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
@@ -2059,7 +2060,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
if (hfi1_ruc_check_hdr(ibp, packet))
return;
- is_fecn = process_ecn(qp, packet, false);
+ fecn = process_ecn(qp, packet);
/*
* Process responses (ACKs) before anything else. Note that the
@@ -2070,8 +2071,6 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
rc_rcv_resp(packet);
- if (is_fecn)
- goto send_ack;
return;
}
@@ -2303,7 +2302,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -2347,11 +2346,11 @@ send_last:
/* Schedule the send engine. */
qp->s_flags |= RVT_S_RESP_PENDING;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (is_fecn)
- goto send_ack;
return;
}
@@ -2377,7 +2376,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -2413,11 +2412,11 @@ send_last:
/* Schedule the send engine. */
qp->s_flags |= RVT_S_RESP_PENDING;
+ if (fecn)
+ qp->s_flags |= RVT_S_ECN;
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
- if (is_fecn)
- goto send_ack;
return;
}
@@ -2430,16 +2429,9 @@ send_last:
qp->r_ack_psn = psn;
qp->r_nak_state = 0;
/* Send an ACK if requested or required. */
- if (psn & IB_BTH_REQ_ACK) {
- if (packet->numpkt == 0) {
- rc_cancel_ack(qp);
- goto send_ack;
- }
- if (qp->r_adefered >= HFI1_PSN_CREDIT) {
- rc_cancel_ack(qp);
- goto send_ack;
- }
- if (unlikely(is_fecn)) {
+ if (psn & IB_BTH_REQ_ACK || fecn) {
+ if (packet->numpkt == 0 || fecn ||
+ qp->r_adefered >= HFI1_PSN_CREDIT) {
rc_cancel_ack(qp);
goto send_ack;
}
@@ -2480,7 +2472,7 @@ nack_acc:
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
qp->r_ack_psn = qp->r_psn;
send_ack:
- hfi1_send_rc_ack(packet, is_fecn);
+ hfi1_send_rc_ack(packet, fecn);
}
void hfi1_rc_hdrerr(
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 7fb317c711df..431b0e71a70b 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -506,7 +506,7 @@ void _hfi1_do_send(struct work_struct *work)
/**
* hfi1_do_send - perform a send on a QP
- * @work: contains a pointer to the QP
+ * @qp: a pointer to the QP
* @in_thread: true if in a workqueue thread
*
* Process entries in the send work queue until credit or queue is
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 0a871614fde8..6665dd550ae8 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1424,6 +1424,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
seqlock_init(&sde->head_lock);
spin_lock_init(&sde->senddmactrl_lock);
spin_lock_init(&sde->flushlist_lock);
+ seqlock_init(&sde->waitlock);
/* insure there is always a zero bit */
sde->ahg_bits = 0xfffffffe00000000ULL;
@@ -1757,7 +1758,6 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
struct iowait *wait, *nw;
struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
uint i, n = 0, seq, max_idx = 0;
- struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
u8 max_starved_cnt = 0;
#ifdef CONFIG_SDMA_VERBOSITY
@@ -1767,10 +1767,10 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
#endif
do {
- seq = read_seqbegin(&dev->iowait_lock);
+ seq = read_seqbegin(&sde->waitlock);
if (!list_empty(&sde->dmawait)) {
/* at least one item */
- write_seqlock(&dev->iowait_lock);
+ write_seqlock(&sde->waitlock);
/* Harvest waiters wanting DMA descriptors */
list_for_each_entry_safe(
wait,
@@ -1793,10 +1793,10 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
list_del_init(&wait->list);
waits[n++] = wait;
}
- write_sequnlock(&dev->iowait_lock);
+ write_sequnlock(&sde->waitlock);
break;
}
- } while (read_seqretry(&dev->iowait_lock, seq));
+ } while (read_seqretry(&sde->waitlock, seq));
/* Schedule the most starved one first */
if (n)
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 6dc63d7c5685..1e2e40f79cb2 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -382,6 +382,7 @@ struct sdma_engine {
u64 progress_int_cnt;
/* private: */
+ seqlock_t waitlock;
struct list_head dmawait;
/* CONFIG SDMA for now, just blindly duplicate */
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
index e62171fb7379..de7a87392b8d 100644
--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -86,14 +86,14 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
* actual function to work and can not be in a macro.
*/
#define __hfi1_trace_def(lvl) \
-void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
+void __printf(2, 3) __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
\
DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
TP_PROTO(const char *function, struct va_format *vaf), \
TP_ARGS(function, vaf))
#define __hfi1_trace_fn(lvl) \
-void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
+void __printf(2, 3) __hfi1_trace_##lvl(const char *func, char *fmt, ...)\
{ \
struct va_format vaf = { \
.fmt = fmt, \
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 6aca0c5a7f97..6ba47037c424 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -321,7 +321,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
if (hfi1_ruc_check_hdr(ibp, packet))
return;
- process_ecn(qp, packet, true);
+ process_ecn(qp, packet);
psn = ib_bth_get_psn(ohdr);
/* Compare the PSN verses the expected PSN. */
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 4baa8f4d49de..88242fe95eaa 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -51,6 +51,7 @@
#include "hfi.h"
#include "mad.h"
#include "verbs_txreq.h"
+#include "trace_ibhdrs.h"
#include "qp.h"
/* We support only two types - 9B and 16B for now */
@@ -656,18 +657,19 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
u32 bth0, plen, vl, hwords = 7;
u16 len;
u8 l4;
- struct hfi1_16b_header hdr;
+ struct hfi1_opa_header hdr;
struct ib_other_headers *ohdr;
struct pio_buf *pbuf;
struct send_context *ctxt = qp_to_send_context(qp, sc5);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 nwords;
+ hdr.hdr_type = HFI1_PKT_TYPE_16B;
/* Populate length */
nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
if (old_grh) {
- struct ib_grh *grh = &hdr.u.l.grh;
+ struct ib_grh *grh = &hdr.opah.u.l.grh;
grh->version_tclass_flow = old_grh->version_tclass_flow;
grh->paylen = cpu_to_be16(
@@ -675,11 +677,11 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
grh->hop_limit = 0xff;
grh->sgid = old_grh->dgid;
grh->dgid = old_grh->sgid;
- ohdr = &hdr.u.l.oth;
+ ohdr = &hdr.opah.u.l.oth;
l4 = OPA_16B_L4_IB_GLOBAL;
hwords += sizeof(struct ib_grh) / sizeof(u32);
} else {
- ohdr = &hdr.u.oth;
+ ohdr = &hdr.opah.u.oth;
l4 = OPA_16B_L4_IB_LOCAL;
}
@@ -693,7 +695,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
/* Convert dwords to flits */
len = (hwords + nwords) >> 1;
- hfi1_make_16b_hdr(&hdr, slid, dlid, len, pkey, 1, 0, l4, sc5);
+ hfi1_make_16b_hdr(&hdr.opah, slid, dlid, len, pkey, 1, 0, l4, sc5);
plen = 2 /* PBC */ + hwords + nwords;
pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
@@ -701,9 +703,11 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
if (ctxt) {
pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
- if (pbuf)
+ if (pbuf) {
+ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
&hdr, hwords);
+ }
}
}
@@ -715,14 +719,15 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 bth0, plen, vl, hwords = 5;
u16 lrh0;
u8 sl = ibp->sc_to_sl[sc5];
- struct ib_header hdr;
+ struct hfi1_opa_header hdr;
struct ib_other_headers *ohdr;
struct pio_buf *pbuf;
struct send_context *ctxt = qp_to_send_context(qp, sc5);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ hdr.hdr_type = HFI1_PKT_TYPE_9B;
if (old_grh) {
- struct ib_grh *grh = &hdr.u.l.grh;
+ struct ib_grh *grh = &hdr.ibh.u.l.grh;
grh->version_tclass_flow = old_grh->version_tclass_flow;
grh->paylen = cpu_to_be16(
@@ -730,11 +735,11 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
grh->hop_limit = 0xff;
grh->sgid = old_grh->dgid;
grh->dgid = old_grh->sgid;
- ohdr = &hdr.u.l.oth;
+ ohdr = &hdr.ibh.u.l.oth;
lrh0 = HFI1_LRH_GRH;
hwords += sizeof(struct ib_grh) / sizeof(u32);
} else {
- ohdr = &hdr.u.oth;
+ ohdr = &hdr.ibh.u.oth;
lrh0 = HFI1_LRH_BTH;
}
@@ -746,16 +751,18 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
ohdr->bth[2] = 0; /* PSN 0 */
- hfi1_make_ib_hdr(&hdr, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
+ hfi1_make_ib_hdr(&hdr.ibh, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
plen = 2 /* PBC */ + hwords;
pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
vl = sc_to_vlt(ppd->dd, sc5);
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
if (ctxt) {
pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
- if (pbuf)
+ if (pbuf) {
+ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
&hdr, hwords);
+ }
}
}
@@ -912,7 +919,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
}
- process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
+ process_ecn(qp, packet);
/*
* Get the number of bytes the message was padded by
* and drop incomplete packets.
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 3f0aadccd9f6..e5e7fad09f32 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -130,7 +130,6 @@ static int defer_packet_queue(
{
struct hfi1_user_sdma_pkt_q *pq =
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
- struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
struct user_sdma_txreq *tx =
container_of(txreq, struct user_sdma_txreq, txreq);
@@ -144,10 +143,10 @@ static int defer_packet_queue(
* it is supposed to be enqueued.
*/
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
- write_seqlock(&dev->iowait_lock);
+ write_seqlock(&sde->waitlock);
if (list_empty(&pq->busy.list))
iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
- write_sequnlock(&dev->iowait_lock);
+ write_sequnlock(&sde->waitlock);
return -EBUSY;
eagain:
return -EAGAIN;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index cfc20f9725de..bf8b7ac4a913 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -765,7 +765,6 @@ static int pio_wait(struct rvt_qp *qp,
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_devdata *dd = sc->dd;
- struct hfi1_ibdev *dev = &dd->verbs_dev;
unsigned long flags;
int ret = 0;
@@ -777,7 +776,7 @@ static int pio_wait(struct rvt_qp *qp,
*/
spin_lock_irqsave(&qp->s_lock, flags);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- write_seqlock(&dev->iowait_lock);
+ write_seqlock(&sc->waitlock);
list_add_tail(&ps->s_txreq->txreq.list,
&ps->wait->tx_head);
if (list_empty(&priv->s_iowait.list)) {
@@ -790,14 +789,14 @@ static int pio_wait(struct rvt_qp *qp,
was_empty = list_empty(&sc->piowait);
iowait_queue(ps->pkts_sent, &priv->s_iowait,
&sc->piowait);
- priv->s_iowait.lock = &dev->iowait_lock;
+ priv->s_iowait.lock = &sc->waitlock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
rvt_get_qp(qp);
/* counting: only call wantpiobuf_intr if first user */
if (was_empty)
hfi1_sc_wantpiobuf_intr(sc, 1);
}
- write_sequnlock(&dev->iowait_lock);
+ write_sequnlock(&sc->waitlock);
hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
}
@@ -919,6 +918,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (slen > len)
slen = len;
+ if (slen > ss->sge.sge_length)
+ slen = ss->sge.sge_length;
rvt_update_sge(ss, slen, false);
seg_pio_copy_mid(pbuf, addr, slen);
len -= slen;
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index c9876d9e3cb9..a922db58be14 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -816,14 +816,14 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
- chip_sdma_engines(dd), dd->num_vnic_contexts);
+ dd->num_sdma, dd->num_vnic_contexts);
if (!netdev)
return ERR_PTR(-ENOMEM);
rn = netdev_priv(netdev);
vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd;
- vinfo->num_tx_q = chip_sdma_engines(dd);
+ vinfo->num_tx_q = dd->num_sdma;
vinfo->num_rx_q = dd->num_vnic_contexts;
vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn;
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index 97bd940a056a..1f81c480e028 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -57,7 +57,6 @@
#define HFI1_VNIC_TXREQ_NAME_LEN 32
#define HFI1_VNIC_SDMA_DESC_WTRMRK 64
-#define HFI1_VNIC_SDMA_RETRY_COUNT 1
/*
* struct vnic_txreq - VNIC transmit descriptor
@@ -67,7 +66,6 @@
* @pad: pad buffer
* @plen: pad length
* @pbc_val: pbc value
- * @retry_count: tx retry count
*/
struct vnic_txreq {
struct sdma_txreq txreq;
@@ -77,8 +75,6 @@ struct vnic_txreq {
unsigned char pad[HFI1_VNIC_MAX_PAD];
u16 plen;
__le64 pbc_val;
-
- u32 retry_count;
};
static void vnic_sdma_complete(struct sdma_txreq *txreq,
@@ -196,7 +192,6 @@ int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
ret = build_vnic_tx_desc(sde, tx, pbc);
if (unlikely(ret))
goto free_desc;
- tx->retry_count = 0;
ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
&tx->txreq, vnic_sdma->pkts_sent);
@@ -237,18 +232,17 @@ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
{
struct hfi1_vnic_sdma *vnic_sdma =
container_of(wait->iow, struct hfi1_vnic_sdma, wait);
- struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
- struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
- if (sdma_progress(sde, seq, txreq))
- if (tx->retry_count++ < HFI1_VNIC_SDMA_RETRY_COUNT)
- return -EAGAIN;
+ write_seqlock(&sde->waitlock);
+ if (sdma_progress(sde, seq, txreq)) {
+ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+ }
vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
- write_seqlock(&dev->iowait_lock);
if (list_empty(&vnic_sdma->wait.list))
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
- write_sequnlock(&dev->iowait_lock);
+ write_sequnlock(&sde->waitlock);
return -EBUSY;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 29311da34971..569b8710f571 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4568,7 +4568,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
event_type, eq->eqn, eq->cons_index);
break;
- };
+ }
eq->event_type = event_type;
eq->sub_type = sub_type;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 2f2b4426ded7..8feec35f95a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -552,7 +552,7 @@ enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
void i40iw_request_reset(struct i40iw_device *iwdev);
void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
-void i40iw_setup_cm_core(struct i40iw_device *iwdev);
+int i40iw_setup_cm_core(struct i40iw_device *iwdev);
void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
void i40iw_process_aeq(struct i40iw_device *);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index fd2f89f46248..f6ad1934a9cd 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -404,7 +404,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
if (pdata)
pd_len = pdata->size;
- if (cm_node->vlan_id < VLAN_TAG_PRESENT)
+ if (cm_node->vlan_id <= VLAN_VID_MASK)
eth_hlen += 4;
if (cm_node->ipv4)
@@ -433,7 +433,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
- if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ if (cm_node->vlan_id <= VLAN_VID_MASK) {
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
@@ -463,7 +463,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
- if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ if (cm_node->vlan_id <= VLAN_VID_MASK) {
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
@@ -3237,7 +3237,7 @@ void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
* core
* @iwdev: iwarp device structure
*/
-void i40iw_setup_cm_core(struct i40iw_device *iwdev)
+int i40iw_setup_cm_core(struct i40iw_device *iwdev)
{
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
@@ -3257,9 +3257,19 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
cm_core->event_wq = alloc_ordered_workqueue("iwewq",
WQ_MEM_RECLAIM);
+ if (!cm_core->event_wq)
+ goto error;
cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
WQ_MEM_RECLAIM);
+ if (!cm_core->disconn_wq)
+ goto error;
+
+ return 0;
+error:
+ i40iw_cleanup_cm_core(&iwdev->cm_core);
+
+ return -ENOMEM;
}
/**
@@ -3279,8 +3289,10 @@ void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
del_timer_sync(&cm_core->tcp_timer);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
- destroy_workqueue(cm_core->event_wq);
- destroy_workqueue(cm_core->disconn_wq);
+ if (cm_core->event_wq)
+ destroy_workqueue(cm_core->event_wq);
+ if (cm_core->disconn_wq)
+ destroy_workqueue(cm_core->disconn_wq);
}
/**
@@ -3324,7 +3336,7 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
tcp_info->flow_label = 0;
tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
- if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ if (cm_node->vlan_id <= VLAN_VID_MASK) {
tcp_info->insert_vlan_tag = true;
tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
cm_node->vlan_id);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 68095f00d08f..10932baee279 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1641,7 +1641,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
iwdev = &hdl->device;
iwdev->hdl = hdl;
dev = &iwdev->sc_dev;
- i40iw_setup_cm_core(iwdev);
+ if (i40iw_setup_cm_core(iwdev)) {
+ kfree(iwdev->hdl);
+ return -ENOMEM;
+ }
dev->back_dev = (void *)iwdev;
iwdev->ldev = &hdl->ldev;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index d7846038caf0..946ccd732d29 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
rcu_read_lock();
in = __in_dev_get_rcu(upper_dev);
- local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
+ if (!in->ifa_list)
+ local_ipaddr = 0;
+ else
+ local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
rcu_read_unlock();
} else {
local_ipaddr = ntohl(ifa->ifa_address);
@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
case NETDEV_UP:
/* Fall through */
case NETDEV_CHANGEADDR:
+
+ /* Just skip if no need to handle ARP cache */
+ if (!local_ipaddr)
+ break;
+
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 102875872bea..a773d1edf7fd 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2722,25 +2722,6 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
}
/**
- * i40iw_get_vector_affinity - report IRQ affinity mask
- * @ibdev: IB device
- * @comp_vector: completion vector index
- */
-static const struct cpumask *i40iw_get_vector_affinity(struct ib_device *ibdev,
- int comp_vector)
-{
- struct i40iw_device *iwdev = to_iwdev(ibdev);
- struct i40iw_msix_vector *msix_vec;
-
- if (iwdev->msix_shared)
- msix_vec = &iwdev->iw_msixtbl[comp_vector];
- else
- msix_vec = &iwdev->iw_msixtbl[comp_vector + 1];
-
- return irq_get_affinity_mask(msix_vec->irq);
-}
-
-/**
* i40iw_init_rdma_device - initialization of iwarp device
* @iwdev: iwarp device
*/
@@ -2832,7 +2813,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
iwibdev->ibdev.post_send = i40iw_post_send;
iwibdev->ibdev.post_recv = i40iw_post_recv;
- iwibdev->ibdev.get_vector_affinity = i40iw_get_vector_affinity;
return iwibdev;
}
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 915374e81948..b5fa4142a536 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -322,7 +322,8 @@ static void qedr_free_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, int sb_id)
{
if (sb_info->sb_virt) {
- dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+ dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
+ QED_SB_TYPE_CNQ);
dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
}
@@ -558,11 +559,13 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle)
static void qedr_sync_free_irqs(struct qedr_dev *dev)
{
u32 vector;
+ u16 idx;
int i;
for (i = 0; i < dev->int_info.used_cnt; i++) {
if (dev->int_info.msix_cnt) {
- vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+ idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
+ vector = dev->int_info.msix[idx].vector;
synchronize_irq(vector);
free_irq(vector, &dev->cnq_array[i]);
}
@@ -574,6 +577,7 @@ static void qedr_sync_free_irqs(struct qedr_dev *dev)
static int qedr_req_msix_irqs(struct qedr_dev *dev)
{
int i, rc = 0;
+ u16 idx;
if (dev->num_cnq > dev->int_info.msix_cnt) {
DP_ERR(dev,
@@ -583,7 +587,8 @@ static int qedr_req_msix_irqs(struct qedr_dev *dev)
}
for (i = 0; i < dev->num_cnq; i++) {
- rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+ idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
+ rc = request_irq(dev->int_info.msix[idx].vector,
qedr_irq_handler, 0, dev->cnq_array[i].name,
&dev->cnq_array[i]);
if (rc) {
@@ -920,6 +925,16 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
dev->user_dpm_enabled = dev_info.user_dpm_enabled;
dev->rdma_type = dev_info.rdma_type;
dev->num_hwfns = dev_info.common.num_hwfns;
+
+ if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
+ rc = dev->ops->iwarp_set_engine_affin(cdev, false);
+ if (rc) {
+ DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
+ goto init_err;
+ }
+ }
+ dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
+
dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
@@ -980,6 +995,10 @@ static void qedr_remove(struct qedr_dev *dev)
qedr_stop_hw(dev);
qedr_sync_free_irqs(dev);
qedr_free_resources(dev);
+
+ if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
+ dev->ops->iwarp_set_engine_affin(dev->cdev, true);
+
ib_dealloc_device(&dev->ibdev);
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 53bbe6b4e6e6..18778694f652 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -162,6 +162,8 @@ struct qedr_dev {
u32 dp_module;
u8 dp_level;
u8 num_hwfns;
+#define QEDR_IS_CMT(dev) ((dev)->num_hwfns > 1)
+ u8 affin_hwfn_idx;
u8 gsi_ll2_handle;
uint wq_multiplier;
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 505fa3648762..0555e5a8c9ed 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -349,7 +349,7 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
default:
DP_NOTICE(dev, "Unknown event received %d\n", params->event);
break;
- };
+ }
return 0;
}
@@ -492,6 +492,8 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int i;
qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
+ if (unlikely(!qp))
+ return -EINVAL;
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index e40d7a62e2c4..29ea779826f4 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -67,7 +67,7 @@ static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
- if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+ if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
return -EINVAL;
*pkey = QEDR_ROCE_PKEY_DEFAULT;
@@ -2128,7 +2128,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
default:
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_INIT:
switch (new_state) {
@@ -2149,7 +2149,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Invalid state change. */
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_RTR:
/* RTR->XXX */
@@ -2162,7 +2162,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Invalid state change. */
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_RTS:
/* RTS->XXX */
@@ -2175,7 +2175,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Invalid state change. */
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_SQD:
/* SQD->XXX */
@@ -2187,7 +2187,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Invalid state change. */
status = -EINVAL;
break;
- };
+ }
break;
case QED_ROCE_QP_STATE_ERR:
/* ERR->XXX */
@@ -2205,12 +2205,12 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
default:
status = -EINVAL;
break;
- };
+ }
break;
default:
status = -EINVAL;
break;
- };
+ }
return status;
}
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index cc429b567d0a..c8d29d3c9468 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -393,7 +393,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(umem))
return (void *)umem;
- n = umem->nmap;
+ n = ib_umem_num_pages(umem);
mr = __rvt_alloc_mr(n, pd);
if (IS_ERR(mr)) {
@@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(mapped_segs == mr->mr.max_segs))
return -ENOMEM;
- if (mr->mr.length == 0) {
- mr->mr.user_base = addr;
- mr->mr.iova = addr;
- }
-
m = mapped_segs / RVT_SEGSZ;
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
* @sg_nents: number of entries in sg
* @sg_offset: offset in bytes into sg
*
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
* Return: number of sg elements mapped to the memory region
*/
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rvt_mr *mr = to_imr(ibmr);
+ int ret;
mr->mr.length = 0;
mr->mr.page_shift = PAGE_SHIFT;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
- rvt_set_page);
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+ mr->mr.user_base = ibmr->iova;
+ mr->mr.iova = ibmr->iova;
+ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+ mr->mr.length = (size_t)ibmr->length;
+ return ret;
}
/**
@@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
ibmr->rkey = key;
mr->mr.lkey = key;
mr->mr.access_flags = access;
+ mr->mr.iova = ibmr->iova;
atomic_set(&mr->mr.lkey_invalid, 0);
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index fc10f595754c..146e932c4a56 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2898,18 +2898,8 @@ again:
goto send_comp;
case IB_WR_SEND_WITH_INV:
- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
- wc.wc_flags = IB_WC_WITH_INVALIDATE;
- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
- }
- goto send;
-
case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
case IB_WR_SEND:
-send:
ret = rvt_get_rwqe(qp, false);
if (ret < 0)
goto op_err;
@@ -2917,6 +2907,22 @@ send:
goto rnr_nak;
if (wqe->length > qp->r_len)
goto inv_err;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND_WITH_INV:
+ if (!rvt_invalidate_rkey(qp,
+ wqe->wr.ex.invalidate_rkey)) {
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ wc.ex.invalidate_rkey =
+ wqe->wr.ex.invalidate_rkey;
+ }
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ break;
+ default:
+ break;
+ }
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/drivers/infiniband/sw/rdmavt/trace_cq.h b/drivers/infiniband/sw/rdmavt/trace_cq.h
index df8e1adbef9d..e3c416c6f900 100644
--- a/drivers/infiniband/sw/rdmavt/trace_cq.h
+++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
@@ -105,7 +105,7 @@ DEFINE_EVENT(rvt_cq_template, rvt_create_cq,
TP_ARGS(cq, attr));
#define CQ_PRN \
-"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x"
+"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x flags %x imm %x"
DECLARE_EVENT_CLASS(
rvt_cq_entry_template,
@@ -119,6 +119,8 @@ DECLARE_EVENT_CLASS(
__field(u32, qpn)
__field(u32, length)
__field(u32, idx)
+ __field(u32, flags)
+ __field(u32, imm)
),
TP_fast_assign(
RDI_DEV_ASSIGN(cq->rdi)
@@ -128,6 +130,8 @@ DECLARE_EVENT_CLASS(
__entry->length = wc->byte_len;
__entry->qpn = wc->qp->qp_num;
__entry->idx = idx;
+ __entry->flags = wc->wc_flags;
+ __entry->imm = be32_to_cpu(wc->ex.imm_data);
),
TP_printk(
CQ_PRN,
@@ -137,7 +141,9 @@ DECLARE_EVENT_CLASS(
__entry->status,
__entry->opcode, show_wc_opcode(__entry->opcode),
__entry->length,
- __entry->qpn
+ __entry->qpn,
+ __entry->flags,
+ __entry->imm
)
);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 40e82e0f6c2d..a97d779be2b6 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -518,8 +518,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
if (unlikely(!skb))
goto out;
- skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
+ skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
+ /* FIXME: hold reference to this netdev until life of this skb. */
skb->dev = ndev;
if (av->network_type == RDMA_NETWORK_IPV4)
skb->protocol = htons(ETH_P_IP);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index cee37bcf4be5..698eba5a176f 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2665,7 +2665,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
- mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2675,6 +2674,7 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
del_gendisk(md->disk);
}
+ mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
}
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index fefaccbbd814..84ec3967489c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3187,8 +3187,12 @@ static int bond_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
if (event_dev->flags & IFF_MASTER) {
+ int ret;
+
netdev_dbg(event_dev, "IFF_MASTER\n");
- return bond_master_netdev_event(event, event_dev);
+ ret = bond_master_netdev_event(event, event_dev);
+ if (ret != NOTIFY_DONE)
+ return ret;
}
if (event_dev->flags & IFF_SLAVE) {
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 8a1da7e67707..7f8d269dd75a 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -130,6 +130,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
(fs->m_ext.vlan_etype || fs->m_ext.data[1]))
return -EINVAL;
+ if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+ return -EINVAL;
+
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
@@ -330,6 +333,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
int ret;
u32 reg;
+ if (loc >= CFP_NUM_RULES)
+ return -EINVAL;
+
/* Refuse deletion of unused rules, and the default reserved rule */
if (!test_bit(loc, priv->cfp.used) || loc == 0)
return -EINVAL;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 67824faf2cd7..f114da4346ef 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -854,7 +854,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
if (err)
return UINT64_MAX;
- high = reg;
+ low |= ((u32)reg) << 16;
}
break;
case STATS_TYPE_BANK1:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 5cd3135dfe30..dae66cbed58b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -32,7 +32,7 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.712.30-0"
+#define DRV_MODULE_VERSION "1.713.36-0"
#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index d9057c8bbeef..78326a6c0aba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 1
+#define BCM_5710_FW_REVISION_VERSION 11
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3639,8 +3639,10 @@ struct client_init_rx_data {
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1<<3)
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF<<4)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4
u8 vmqueue_mode_en_flg;
u8 extra_data_over_sgl_en_flg;
u8 cache_line_alignment_log_size;
@@ -3831,7 +3833,7 @@ struct eth_classify_cmd_header {
*/
struct eth_classify_header {
u8 rule_cnt;
- u8 reserved0;
+ u8 warning_on_error;
__le16 reserved1;
__le32 echo;
};
@@ -4752,6 +4754,8 @@ struct tpa_update_ramrod_data {
__le32 sge_page_base_hi;
__le16 sge_pause_thr_low;
__le16 sge_pause_thr_high;
+ u8 tpa_over_vlan_disable;
+ u8 reserved[7];
};
@@ -4946,7 +4950,7 @@ struct fairness_vars_per_port {
u32 upper_bound;
u32 fair_threshold;
u32 fairness_timeout;
- u32 reserved0;
+ u32 size_thr;
};
/*
@@ -5415,7 +5419,9 @@ struct function_start_data {
u8 sd_vlan_force_pri_val;
u8 c2s_pri_tt_valid;
u8 c2s_pri_default;
- u8 reserved2[6];
+ u8 tx_vlan_filtering_enable;
+ u8 tx_vlan_filtering_use_pvid;
+ u8 reserved2[4];
struct c2s_pri_trans_table_entry c2s_pri_trans_table;
};
@@ -5448,7 +5454,8 @@ struct function_update_data {
u8 reserved1;
__le16 sd_vlan_tag;
__le16 sd_vlan_eth_type;
- __le16 reserved0;
+ u8 tx_vlan_filtering_pvid_change_flg;
+ u8 reserved0;
__le32 reserved2;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 46ee2c01f4c5..066765fbef06 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -449,7 +449,7 @@ static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
ccd[cos] =
(u32)input_data->cos_min_rate[cos] * 100 *
(T_FAIR_COEF / (8 * 100 * cosWeightSum));
- if (ccd[cos] < pdata->fair_vars.fair_threshold
+ if (ccd[cos] < pdata->fair_vars.fair_threshold
+ MIN_ABOVE_THRESH) {
ccd[cos] =
pdata->fair_vars.fair_threshold +
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 98d4c5a3ff21..d581d0ae6584 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -837,49 +837,45 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
switch (cos_entry) {
case 0:
- nig_reg_adress_crd_weight =
- (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight =
+ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+ break;
case 1:
- nig_reg_adress_crd_weight = (port) ?
- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+ break;
case 2:
- nig_reg_adress_crd_weight = (port) ?
- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
- pbf_reg_adress_crd_weight = (port) ?
- PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
- break;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+ break;
case 3:
- if (port)
+ if (port)
return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
- pbf_reg_adress_crd_weight =
- PBF_REG_COS3_WEIGHT_P0;
- break;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+ pbf_reg_adress_crd_weight = PBF_REG_COS3_WEIGHT_P0;
+ break;
case 4:
- if (port)
- return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
- pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
- break;
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+ break;
case 5:
- if (port)
- return -EINVAL;
- nig_reg_adress_crd_weight =
- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
- pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
- break;
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+ pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+ break;
}
REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
@@ -966,7 +962,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
if (pri >= max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter Illegal strict priority\n");
- return -EINVAL;
+ return -EINVAL;
}
if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
@@ -1845,28 +1841,28 @@ static int bnx2x_emac_enable(struct link_params *params,
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- /* pause enable/disable */
- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
+ /* pause enable/disable */
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_PFC_ENABLED)) {
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
-
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- } else
- bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_FLOW_EN);
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -6339,7 +6335,7 @@ int bnx2x_set_led(struct link_params *params,
*/
if (!vars->link_up)
break;
- /* else: fall through */
+ /* fall through */
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -6478,9 +6474,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
- /* Link is up only if both local phy and external phy are up */
- if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
- return -ESRCH;
+ /* Link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return -ESRCH;
}
/* In XGXS loopback mode, do not check external PHY */
if (params->loopback_mode == LOOPBACK_XGXS)
@@ -7293,8 +7289,8 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
DP(NETIF_MSG_LINK,
"XAUI workaround has completed\n");
return 0;
- }
- usleep_range(3000, 6000);
+ }
+ usleep_range(3000, 6000);
}
break;
}
@@ -12675,39 +12671,39 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->mac_type = MAC_TYPE_BMAC;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_BMAC;
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_xgxs_deassert(params);
+ bnx2x_xgxs_deassert(params);
- /* Set bmac loopback */
- bnx2x_bmac_enable(params, vars, 1, 1);
+ /* Set bmac loopback */
+ bnx2x_bmac_enable(params, vars, 1, 1);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
}
static void bnx2x_init_emac_loopback(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- vars->link_up = 1;
- vars->line_speed = SPEED_1000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->mac_type = MAC_TYPE_EMAC;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_EMAC;
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_xgxs_deassert(params);
- /* Set bmac loopback */
- bnx2x_emac_enable(params, vars, 1);
- bnx2x_emac_program(params, vars);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ bnx2x_xgxs_deassert(params);
+ /* Set bmac loopback */
+ bnx2x_emac_enable(params, vars, 1);
+ bnx2x_emac_program(params, vars);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
}
static void bnx2x_init_xmac_loopback(struct link_params *params,
@@ -13073,12 +13069,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
}
- if (!CHIP_IS_E3(bp)) {
- bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
- } else {
- bnx2x_set_xmac_rxtx(params, 0);
- bnx2x_set_umac_rxtx(params, 0);
- }
+ if (!CHIP_IS_E3(bp)) {
+ bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+ } else {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
/* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 336007ca2484..3f6af642fa91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11298,7 +11298,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
dev_info.port_hw_config[port].external_phy_config),
SHMEM_RD(bp,
dev_info.port_hw_config[port].external_phy_config2));
- return;
+ return;
}
if (CHIP_IS_E3(bp))
@@ -11998,7 +11998,7 @@ static void validate_set_si_mode(struct bnx2x *bp)
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
- int vn, mfw_vn;
+ int vn;
u32 val = 0, val2 = 0;
int rc = 0;
@@ -12083,12 +12083,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
/*
* Initialize MF configuration
*/
-
bp->mf_ov = 0;
bp->mf_mode = 0;
bp->mf_sub_mode = 0;
vn = BP_VN(bp);
- mfw_vn = BP_FW_MB_IDX(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -15386,27 +15384,47 @@ static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
return 0;
}
+#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
+#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
+#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
+#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
+#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
+#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
+#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
+#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
+#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
int bnx2x_configure_ptp_filters(struct bnx2x *bp)
{
int port = BP_PORT(bp);
+ u32 param, rule;
int rc;
if (!bp->hwtstamp_ioctl_called)
return 0;
+ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK;
switch (bp->tx_type) {
case HWTSTAMP_TX_ON:
bp->flags |= TX_TIMESTAMPING_EN;
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
- NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
- NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
BNX2X_ERR("One-step timestamping is not supported\n");
return -ERANGE;
}
+ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK;
switch (bp->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
@@ -15420,30 +15438,24 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+ REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
/* Initialize PTP detection L2 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+ REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -15451,10 +15463,8 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
break;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index a9eaaf3e73a4..7b22a6d8514c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2977,8 +2977,8 @@ static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
cmd_pos->data.macs_num--;
- DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
- cmd_pos->data.macs_num, cnt);
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+ cmd_pos->data.macs_num, cnt);
/* Break if we reached the maximum
* number of rules.
@@ -3597,8 +3597,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
p->mcast_list_len = reg_sz;
- DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
- cmd, p->mcast_list_len);
+ DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+ cmd, p->mcast_list_len);
break;
case BNX2X_MCAST_CMD_ADD:
@@ -3735,8 +3735,8 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
i++;
- DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- cfg_data.mac);
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ cfg_data.mac);
}
*rdata_idx = i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 383db0c3a78b..ac97d76dc36d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2229,7 +2229,7 @@ int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
rc = bnx2x_vf_close(bp, vf);
if (rc)
goto op_err;
- /* Fallthrough to release resources */
+ /* Fall through - to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 8e0a317b31f7..10ff37d6dc78 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
bnx2x_sample_bulletin(bp);
if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
- BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+ BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
rc = -EINVAL;
goto out;
}
@@ -1654,13 +1654,9 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
{
int i, j;
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
- size_t fsz;
- fsz = tlv->n_mac_vlan_filters *
- sizeof(struct bnx2x_vf_mac_vlan_filter) +
- sizeof(struct bnx2x_vf_mac_vlan_filters);
-
- fl = kzalloc(fsz, GFP_KERNEL);
+ fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters),
+ GFP_KERNEL);
if (!fl)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1f476e69e319..daaca71c9486 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1621,7 +1621,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
bnxt_sched_reset(bp, rxr);
}
- goto next_rx;
+ goto next_rx_no_len;
}
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
@@ -1636,6 +1636,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
rc = -ENOMEM;
goto next_rx;
}
@@ -1702,12 +1704,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
next_rx:
- rxr->rx_prod = NEXT_RX(prod);
- rxr->rx_next_cons = NEXT_RX(cons);
-
cpr->rx_packets += 1;
cpr->rx_bytes += len;
+next_rx_no_len:
+ rxr->rx_prod = NEXT_RX(prod);
+ rxr->rx_next_cons = NEXT_RX(cons);
+
next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
@@ -5131,10 +5134,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
+
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_TX,
close_path ? cmpl_ring_id :
@@ -5147,10 +5150,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
close_path ? cmpl_ring_id :
@@ -5169,10 +5172,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id;
- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+
hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
@@ -5311,17 +5314,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_tx_rings = cpu_to_le16(tx_rings);
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
enables |= tx_rings + ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= rx_rings ?
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
enables |= cp_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
@@ -5361,14 +5363,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
enables |= tx_rings + ring_grps ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
enables |= ring_grps ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
@@ -5500,11 +5501,13 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
stat = bnxt_get_func_stat_ctxs(bp);
if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
- hw_resc->resv_stat_ctxs != stat ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5))))
return true;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
+ hw_resc->resv_irqs != nq)
+ return true;
return false;
}
@@ -6337,7 +6340,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
- if (bp->flags & BNXT_FLAG_ROCE_CAP) {
+ if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
extra_qps = 65536;
extra_srqs = 8192;
@@ -6745,6 +6748,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
struct hwrm_port_qstats_ext_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ u32 tx_stat_size;
int rc;
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
@@ -6754,13 +6758,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
+ tx_stat_size = bp->hw_tx_port_stats_ext ?
+ sizeof(*bp->hw_tx_port_stats_ext) : 0;
+ req.tx_stat_size = cpu_to_le16(tx_stat_size);
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
- bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
+ bp->fw_tx_stats_ext_size = tx_stat_size ?
+ le16_to_cpu(resp->tx_stat_size) / 8 : 0;
} else {
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
@@ -7501,22 +7508,23 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
-int bnxt_reserve_rings(struct bnxt *bp)
+int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
int tcs = netdev_get_num_tc(bp->dev);
- bool reinit_irq = false;
+ bool irq_cleared = false;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
+ if (irq_re_init && BNXT_NEW_RM(bp) &&
+ bnxt_get_num_msix(bp) != bp->total_irqs) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
- reinit_irq = true;
+ irq_cleared = true;
}
rc = __bnxt_reserve_rings(bp);
- if (reinit_irq) {
+ if (irq_cleared) {
if (!rc)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
@@ -8415,7 +8423,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
return rc;
}
}
- rc = bnxt_reserve_rings(bp);
+ rc = bnxt_reserve_rings(bp, irq_re_init);
if (rc)
return rc;
if ((bp->flags & BNXT_FLAG_RFS) &&
@@ -8889,8 +8897,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
skip_uc:
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc && vnic->mc_list_count) {
+ netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ }
if (rc)
- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
rc);
return rc;
@@ -10266,7 +10281,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
- dflt_rings = netif_get_num_default_rss_queues();
+ dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
/* Reduce default rings on multi-port cards so that total default
* rings do not exceed CPU count.
*/
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2fb653e0048d..c09b20b08395 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
+#include <linux/crash_dump.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/switchdev.h>
@@ -1367,7 +1368,8 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- !(bp->flags & BNXT_FLAG_CHIP_P5))
+ !(bp->flags & BNXT_FLAG_CHIP_P5) && \
+ !is_kdump_kernel())
/* Chip class phase 5 */
#define BNXT_CHIP_P5(bp) \
@@ -1776,7 +1778,7 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
int bnxt_get_avail_msix(struct bnxt *bp, int num);
-int bnxt_reserve_rings(struct bnxt *bp);
+int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index adabbe94a259..e1460e391952 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -788,7 +788,7 @@ static int bnxt_set_channels(struct net_device *dev,
*/
}
} else {
- rc = bnxt_reserve_rings(bp);
+ rc = bnxt_reserve_rings(bp, true);
}
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index cf475873ce81..bfa342a98d08 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -147,7 +147,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
bnxt_close_nic(bp, true, false);
rc = bnxt_open_nic(bp, true, false);
} else {
- rc = bnxt_reserve_rings(bp);
+ rc = bnxt_reserve_rings(bp, true);
}
}
if (rc) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f37bb7f65606..f1966f0f26a3 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -719,7 +719,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
- /* else: fall through */
+ /* fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -780,7 +780,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
- /* else: fall through */
+ /* fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 88f8a8fa93cd..7b3fc8185ccd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -32,6 +32,13 @@
#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
/* Supported devices */
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1538,6 +1545,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
struct nicvf *nic = netdev_priv(netdev);
int orig_mtu = netdev->mtu;
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
+
netdev->mtu = new_mtu;
if (!netif_running(netdev))
@@ -1786,8 +1802,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
bool bpf_attached = false;
int ret = 0;
- /* For now just support only the usual MTU sized frames */
- if (prog && (dev->mtu > 1500)) {
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (prog && dev->mtu > MAX_XDP_MTU) {
netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
dev->mtu);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 7c49681407ad..7c5bfc931128 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -81,7 +81,7 @@ static int is_fw_attached(struct cudbg_init *pdbg_init)
{
struct adapter *padap = pdbg_init->adap;
- if (!(padap->flags & FW_OK) || padap->use_bd)
+ if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd)
return 0;
return 1;
@@ -1229,6 +1229,10 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
&temp_buff);
+
+ if (rc)
+ return rc;
+
hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b7b0eb104430..a8fe0808823d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -606,18 +606,18 @@ struct dentry;
struct work_struct;
enum { /* adapter flags */
- FULL_INIT_DONE = (1 << 0),
- DEV_ENABLED = (1 << 1),
- USING_MSI = (1 << 2),
- USING_MSIX = (1 << 3),
- FW_OK = (1 << 4),
- RSS_TNLALLLOOKUP = (1 << 5),
- USING_SOFT_PARAMS = (1 << 6),
- MASTER_PF = (1 << 7),
- FW_OFLD_CONN = (1 << 9),
- ROOT_NO_RELAXED_ORDERING = (1 << 10),
- SHUTTING_DOWN = (1 << 11),
- SGE_DBQ_TIMER = (1 << 12),
+ CXGB4_FULL_INIT_DONE = (1 << 0),
+ CXGB4_DEV_ENABLED = (1 << 1),
+ CXGB4_USING_MSI = (1 << 2),
+ CXGB4_USING_MSIX = (1 << 3),
+ CXGB4_FW_OK = (1 << 4),
+ CXGB4_RSS_TNLALLLOOKUP = (1 << 5),
+ CXGB4_USING_SOFT_PARAMS = (1 << 6),
+ CXGB4_MASTER_PF = (1 << 7),
+ CXGB4_FW_OFLD_CONN = (1 << 9),
+ CXGB4_ROOT_NO_RELAXED_ORDERING = (1 << 10),
+ CXGB4_SHUTTING_DOWN = (1 << 11),
+ CXGB4_SGE_DBQ_TIMER = (1 << 12),
};
enum {
@@ -885,6 +885,7 @@ struct vf_info {
unsigned int tx_rate;
bool pf_set_mac;
u16 vlan;
+ int link_state;
};
enum {
@@ -1574,9 +1575,11 @@ int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
+fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
+ struct link_config *lc);
int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox,
unsigned int port, struct link_config *lc,
- bool sleep_ok, int timeout);
+ u8 sleep_ok, int timeout);
static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
unsigned int port, struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 99f19370c4e2..647dbb2a17e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -378,19 +378,7 @@ static int cim_qcfg_show(struct seq_file *seq, void *v)
QUEREMFLITS_G(p[2]) * 16);
return 0;
}
-
-static int cim_qcfg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cim_qcfg_show, inode->i_private);
-}
-
-static const struct file_operations cim_qcfg_fops = {
- .owner = THIS_MODULE,
- .open = cim_qcfg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(cim_qcfg);
static int cimq_show(struct seq_file *seq, void *v, int idx)
{
@@ -860,8 +848,7 @@ static int tx_rate_show(struct seq_file *seq, void *v)
}
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
+DEFINE_SHOW_ATTRIBUTE(tx_rate);
static int cctrl_tbl_show(struct seq_file *seq, void *v)
{
@@ -893,8 +880,7 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
kfree(incr);
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
+DEFINE_SHOW_ATTRIBUTE(cctrl_tbl);
/* Format a value in a unit that differs from the value's native unit by the
* given factor.
@@ -955,8 +941,7 @@ static int clk_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(clk);
+DEFINE_SHOW_ATTRIBUTE(clk);
/* Firmware Device Log dump. */
static const char * const devlog_level_strings[] = {
@@ -1990,22 +1975,10 @@ static int sensors_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
+DEFINE_SHOW_ATTRIBUTE(sensors);
#if IS_ENABLED(CONFIG_IPV6)
-static int clip_tbl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clip_tbl_show, inode->i_private);
-}
-
-static const struct file_operations clip_tbl_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = clip_tbl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(clip_tbl);
#endif
/*RSS Table.
@@ -2208,8 +2181,7 @@ static int rss_config_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
+DEFINE_SHOW_ATTRIBUTE(rss_config);
/* RSS Secret Key.
*/
@@ -2628,19 +2600,7 @@ static int resources_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int resources_open(struct inode *inode, struct file *file)
-{
- return single_open(file, resources_show, inode->i_private);
-}
-
-static const struct file_operations resources_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = resources_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SHOW_ATTRIBUTE(resources);
/**
* ethqset2pinfo - return port_info of an Ethernet Queue Set
@@ -2660,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
}
/* should never happen! */
- BUG_ON(1);
+ BUG();
return NULL;
}
@@ -3183,7 +3143,7 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
atomic_read(&t->hash_tids_in_use));
- } else if (adap->flags & FW_OFLD_CONN) {
+ } else if (adap->flags & CXGB4_FW_OFLD_CONN) {
seq_printf(seq, "TID range: %u..%u/%u..%u",
t->aftid_base,
t->aftid_end,
@@ -3233,8 +3193,7 @@ static int tid_info_show(struct seq_file *seq, void *v)
t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+DEFINE_SHOW_ATTRIBUTE(tid_info);
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
@@ -3364,21 +3323,9 @@ static int meminfo_show(struct seq_file *seq, void *v)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(meminfo);
-static int meminfo_open(struct inode *inode, struct file *file)
-{
- return single_open(file, meminfo_show, inode->i_private);
-}
-
-static const struct file_operations meminfo_fops = {
- .owner = THIS_MODULE,
- .open = meminfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int chcr_show(struct seq_file *seq, void *v)
+static int chcr_stats_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
@@ -3399,20 +3346,7 @@ static int chcr_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.ipsec_cnt));
return 0;
}
-
-
-static int chcr_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, chcr_show, inode->i_private);
-}
-
-static const struct file_operations chcr_stats_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = chcr_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(chcr_stats);
#define PRINT_ADAP_STATS(string, value) \
seq_printf(seq, "%-25s %-20llu\n", (string), \
@@ -3573,8 +3507,7 @@ static int tp_stats_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
+DEFINE_SHOW_ATTRIBUTE(tp_stats);
/* Add an array of Debug FS files.
*/
@@ -3603,7 +3536,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "cim_pif_la", &cim_pif_la_fops, 0400, 0 },
{ "cim_ma_la", &cim_ma_la_fops, 0400, 0 },
{ "cim_qcfg", &cim_qcfg_fops, 0400, 0 },
- { "clk", &clk_debugfs_fops, 0400, 0 },
+ { "clk", &clk_fops, 0400, 0 },
{ "devlog", &devlog_fops, 0400, 0 },
{ "mboxlog", &mboxlog_fops, 0400, 0 },
{ "mbox0", &mbox_debugfs_fops, 0600, 0 },
@@ -3621,11 +3554,11 @@ int t4_setup_debugfs(struct adapter *adap)
{ "l2t", &t4_l2t_fops, 0400, 0},
{ "mps_tcam", &mps_tcam_debugfs_fops, 0400, 0 },
{ "rss", &rss_debugfs_fops, 0400, 0 },
- { "rss_config", &rss_config_debugfs_fops, 0400, 0 },
+ { "rss_config", &rss_config_fops, 0400, 0 },
{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
- { "resources", &resources_debugfs_fops, 0400, 0 },
+ { "resources", &resources_fops, 0400, 0 },
#ifdef CONFIG_CHELSIO_T4_DCB
{ "dcb_info", &dcb_info_debugfs_fops, 0400, 0 },
#endif
@@ -3644,18 +3577,18 @@ int t4_setup_debugfs(struct adapter *adap)
{ "obq_ncsi", &cim_obq_fops, 0400, 5 },
{ "tp_la", &tp_la_fops, 0400, 0 },
{ "ulprx_la", &ulprx_la_fops, 0400, 0 },
- { "sensors", &sensors_debugfs_fops, 0400, 0 },
+ { "sensors", &sensors_fops, 0400, 0 },
{ "pm_stats", &pm_stats_debugfs_fops, 0400, 0 },
- { "tx_rate", &tx_rate_debugfs_fops, 0400, 0 },
- { "cctrl", &cctrl_tbl_debugfs_fops, 0400, 0 },
+ { "tx_rate", &tx_rate_fops, 0400, 0 },
+ { "cctrl", &cctrl_tbl_fops, 0400, 0 },
#if IS_ENABLED(CONFIG_IPV6)
- { "clip_tbl", &clip_tbl_debugfs_fops, 0400, 0 },
+ { "clip_tbl", &clip_tbl_fops, 0400, 0 },
#endif
- { "tids", &tid_info_debugfs_fops, 0400, 0},
+ { "tids", &tid_info_fops, 0400, 0},
{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
{ "meminfo", &meminfo_fops, 0400, 0 },
- { "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
- { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
+ { "crypto", &chcr_stats_fops, 0400, 0 },
+ { "tp_stats", &tp_stats_fops, 0400, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index 23f43a0f8950..ba95e13d52da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -37,19 +37,6 @@
#include <linux/export.h>
-#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
-static int name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, name##_show, inode->i_private); \
-} \
-static const struct file_operations name##_debugfs_fops = { \
- .owner = THIS_MODULE, \
- .open = name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release \
-}
-
struct t4_debugfs_entry {
const char *name;
const struct file_operations *ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 65b8dc706c1d..9e589302af90 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -442,7 +442,7 @@ static unsigned int speed_to_fw_caps(int speed)
* Link Mode Mask.
*/
static void fw_caps_to_lmm(enum fw_port_type port_type,
- unsigned int fw_caps,
+ fw_port_cap32_t fw_caps,
unsigned long *link_mode_mask)
{
#define SET_LMM(__lmm_name) \
@@ -632,7 +632,10 @@ static int get_link_ksettings(struct net_device *dev,
fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
link_ksettings->link_modes.supported);
- fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
+ fw_caps_to_lmm(pi->port_type,
+ t4_link_acaps(pi->adapter,
+ pi->lport,
+ &pi->link_cfg),
link_ksettings->link_modes.advertising);
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
@@ -642,22 +645,6 @@ static int get_link_ksettings(struct net_device *dev,
: SPEED_UNKNOWN);
base->duplex = DUPLEX_FULL;
- if (pi->link_cfg.fc & PAUSE_RX) {
- if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Pause);
- } else {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
- } else if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
-
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
@@ -875,7 +862,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
return -EINVAL;
- if (adapter->flags & FULL_INIT_DONE)
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
return -EBUSY;
for (i = 0; i < pi->nqsets; ++i) {
@@ -940,7 +927,7 @@ static int get_dbqtimer_tick(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- if (!(adap->flags & SGE_DBQ_TIMER))
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
return 0;
return adap->sge.dbqtimer_tick;
@@ -957,7 +944,7 @@ static int get_dbqtimer(struct net_device *dev)
txq = &adap->sge.ethtxq[pi->first_qset];
- if (!(adap->flags & SGE_DBQ_TIMER))
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
return 0;
/* all of the TX Queues use the same Timer Index */
@@ -979,7 +966,7 @@ static int set_dbqtimer_tick(struct net_device *dev, int usecs)
u32 param, val;
int ret;
- if (!(adap->flags & SGE_DBQ_TIMER))
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
return 0;
/* return early if it's the same Timer Tick we're already using */
@@ -1015,7 +1002,7 @@ static int set_dbqtimer(struct net_device *dev, int usecs)
u32 param, val;
int ret;
- if (!(adap->flags & SGE_DBQ_TIMER))
+ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
return 0;
/* Find the SGE Doorbell Timer Value that's closest to the requested
@@ -1042,7 +1029,7 @@ static int set_dbqtimer(struct net_device *dev, int usecs)
return 0;
for (qix = 0; qix < pi->nqsets; qix++, txq++) {
- if (adap->flags & FULL_INIT_DONE) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
param =
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
@@ -1263,7 +1250,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
* firmware image otherwise we'll try to do the entire job from the
* host ... and we always "force" the operation in this path.
*/
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
mbox = adap->mbox;
ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
@@ -1342,7 +1329,7 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
return 0;
/* Interface must be brought up atleast once */
- if (pi->adapter->flags & FULL_INIT_DONE) {
+ if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
for (i = 0; i < pi->rss_size; i++)
pi->rss[i] = p[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
index 6c8a62eefe51..33b2c0c45509 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -74,7 +74,7 @@ int cxgb_fcoe_enable(struct net_device *netdev)
if (is_t4(adap->params.chip))
return -EINVAL;
- if (!(adap->flags & FULL_INIT_DONE))
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE))
return -EINVAL;
dev_info(adap->pdev_dev, "Enabling FCoE offload features\n");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 7dddb9e748b8..4107007b6ec4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -524,8 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
return -ENOMEM;
fwr = __skb_put(skb, len);
- t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1
- : adapter->sge.fw_evtq.abs_id);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
@@ -744,16 +743,40 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
void clear_all_filters(struct adapter *adapter)
{
+ struct net_device *dev = adapter->port[0];
unsigned int i;
if (adapter->tids.ftid_tab) {
struct filter_entry *f = &adapter->tids.ftid_tab[0];
unsigned int max_ftid = adapter->tids.nftids +
adapter->tids.nsftids;
-
+ /* Clear all TCAM filters */
for (i = 0; i < max_ftid; i++, f++)
if (f->valid || f->pending)
- clear_filter(adapter, f);
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
+ /* Clear all hash filters */
+ if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
+ struct filter_entry *f;
+ unsigned int sb;
+
+ for (i = adapter->tids.hash_base;
+ i <= adapter->tids.ntids; i++) {
+ f = (struct filter_entry *)
+ adapter->tids.tid_tab[i];
+
+ if (f && (f->valid || f->pending))
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
+ sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+ for (i = 0; i < sb; i++) {
+ f = (struct filter_entry *)adapter->tids.tid_tab[i];
+
+ if (f && (f->valid || f->pending))
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
}
}
@@ -1568,9 +1591,8 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx ctx;
int ret;
- /* If we are shutting down the adapter do not wait for completion */
- if (netdev2adap(dev)->flags & SHUTTING_DOWN)
- return __cxgb4_del_filter(dev, filter_id, fs, NULL);
+ if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
+ return 0;
init_completion(&ctx.completion);
@@ -1722,12 +1744,13 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
break;
default:
- dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
- __func__, status);
+ if (status != CPL_ERR_TCAM_FULL)
+ dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
+ __func__, status);
if (ctx) {
if (status == CPL_ERR_TCAM_FULL)
- ctx->result = -EAGAIN;
+ ctx->result = -ENOSPC;
else
ctx->result = -EINVAL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4798c7da28b9..c178bce8cf5d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -651,12 +651,12 @@ out:
static void disable_msi(struct adapter *adapter)
{
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4_USING_MSIX;
+ } else if (adapter->flags & CXGB4_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4_USING_MSI;
}
}
@@ -672,7 +672,7 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
adap->swintr = 1;
t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
}
- if (adap->flags & MASTER_PF)
+ if (adap->flags & CXGB4_MASTER_PF)
t4_slow_intr_handler(adap);
return IRQ_HANDLED;
}
@@ -837,9 +837,9 @@ static void quiesce_rx(struct adapter *adap)
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
- if (adap->flags & FULL_INIT_DONE) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE) {
t4_intr_disable(adap);
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
free_msix_queue_irqs(adap);
free_irq(adap->msix_info[0].vec, adap);
} else {
@@ -880,7 +880,7 @@ static int setup_fw_sge_queues(struct adapter *adap)
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
@@ -939,7 +939,7 @@ static int setup_sge_queues(struct adapter *adap)
err = t4_sge_alloc_eth_txq(adap, t, dev,
netdev_get_tx_queue(dev, j),
q->rspq.cntxt_id,
- !!(adap->flags & SGE_DBQ_TIMER));
+ !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
if (err)
goto freeout;
}
@@ -2280,7 +2280,7 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto freeq;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
@@ -2293,7 +2293,8 @@ static int cxgb_up(struct adapter *adap)
}
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
- (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ (adap->flags & CXGB4_USING_MSI) ? 0
+ : IRQF_SHARED,
adap->port[0]->name, adap);
if (err)
goto irq_err;
@@ -2302,7 +2303,7 @@ static int cxgb_up(struct adapter *adap)
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
- adap->flags |= FULL_INIT_DONE;
+ adap->flags |= CXGB4_FULL_INIT_DONE;
mutex_unlock(&uld_mutex);
notify_ulds(adap, CXGB4_STATE_UP);
@@ -2322,6 +2323,8 @@ static int cxgb_up(struct adapter *adap)
static void cxgb_down(struct adapter *adapter)
{
+ struct hash_mac_addr *entry, *tmp;
+
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
@@ -2330,7 +2333,13 @@ static void cxgb_down(struct adapter *adapter)
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
- adapter->flags &= ~FULL_INIT_DONE;
+
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
+ adapter->flags &= ~CXGB4_FULL_INIT_DONE;
}
/*
@@ -2344,7 +2353,7 @@ static int cxgb_open(struct net_device *dev)
netif_carrier_off(dev);
- if (!(adapter->flags & FULL_INIT_DONE)) {
+ if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
err = cxgb_up(adapter);
if (err < 0)
return err;
@@ -2739,6 +2748,7 @@ static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
ivi->min_tx_rate = 0;
ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
ivi->vlan = vfinfo->vlan;
+ ivi->linkstate = vfinfo->link_state;
return 0;
}
@@ -2878,6 +2888,49 @@ static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
return ret;
}
+
+static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
+ int link)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ u32 param, val;
+ int ret = 0;
+
+ if (vf >= adap->num_vfs)
+ return -EINVAL;
+
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ val = FW_VF_LINK_STATE_AUTO;
+ break;
+
+ case IFLA_VF_LINK_STATE_ENABLE:
+ val = FW_VF_LINK_STATE_ENABLE;
+ break;
+
+ case IFLA_VF_LINK_STATE_DISABLE:
+ val = FW_VF_LINK_STATE_DISABLE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
+ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
+ &param, &val);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Error %d in setting PF %d VF %d link state\n",
+ ret, adap->pf, vf);
+ return -EINVAL;
+ }
+
+ adap->vfinfo[vf].link_state = link;
+ return ret;
+}
#endif /* CONFIG_PCI_IOV */
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2905,7 +2958,7 @@ static void cxgb_netpoll(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
int i;
struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
@@ -2932,7 +2985,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
if (index < 0 || index > pi->nqsets - 1)
return -EINVAL;
- if (!(adap->flags & FULL_INIT_DONE)) {
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to rate limit on queue %d. Link Down?\n",
index);
@@ -3033,7 +3086,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- if (!(adap->flags & FULL_INIT_DONE)) {
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to setup tc on port %d. Link Down?\n",
pi->port_id);
@@ -3293,12 +3346,13 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
- .ndo_open = cxgb4_mgmt_open,
- .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
- .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
- .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
- .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
- .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
+ .ndo_open = cxgb4_mgmt_open,
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
+ .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
};
#endif
@@ -4164,7 +4218,7 @@ static int adap_init0(struct adapter *adap)
return ret;
}
if (ret == adap->mbox)
- adap->flags |= MASTER_PF;
+ adap->flags |= CXGB4_MASTER_PF;
/*
* If we're the Master PF Driver and the device is uninitialized,
@@ -4179,7 +4233,7 @@ static int adap_init0(struct adapter *adap)
/* If firmware is too old (not supported by driver) force an update. */
if (ret)
state = DEV_STATE_UNINIT;
- if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
+ if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
struct fw_info *fw_info;
struct fw_hdr *card_fw;
const struct firmware *fw;
@@ -4241,7 +4295,7 @@ static int adap_init0(struct adapter *adap)
ret);
dev_info(adap->pdev_dev, "Coming up as %s: "\
"Adapter already initialized\n",
- adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+ adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
} else {
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
"Initializing adapter\n");
@@ -4343,7 +4397,7 @@ static int adap_init0(struct adapter *adap)
}
if (!ret)
- adap->flags |= SGE_DBQ_TIMER;
+ adap->flags |= CXGB4_SGE_DBQ_TIMER;
if (is_bypass_device(adap->pdev->device))
adap->params.bypass = 1;
@@ -4467,7 +4521,7 @@ static int adap_init0(struct adapter *adap)
* offload connection through firmware work request
*/
if ((val[0] != val[1]) && (ret >= 0)) {
- adap->flags |= FW_OFLD_CONN;
+ adap->flags |= CXGB4_FW_OFLD_CONN;
adap->tids.aftid_base = val[0];
adap->tids.aftid_end = val[1];
}
@@ -4560,7 +4614,7 @@ static int adap_init0(struct adapter *adap)
* 2. Server filter: This are special filters which are used
* to redirect SYN packets to offload queue.
*/
- if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
+ if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
adap->tids.sftid_base = adap->tids.ftid_base +
DIV_ROUND_UP(adap->tids.nftids, 3);
adap->tids.nsftids = adap->tids.nftids -
@@ -4739,7 +4793,7 @@ static int adap_init0(struct adapter *adap)
adap->params.b_wnd);
}
t4_init_sge_params(adap);
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
t4_init_tp_params(adap, true);
return 0;
@@ -4774,7 +4828,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
goto out;
rtnl_lock();
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
spin_lock(&adap->stats_lock);
for_each_port(adap, i) {
@@ -4786,12 +4840,12 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
}
spin_unlock(&adap->stats_lock);
disable_interrupts(adap);
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
- if ((adap->flags & DEV_ENABLED)) {
+ if ((adap->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
- adap->flags &= ~DEV_ENABLED;
+ adap->flags &= ~CXGB4_DEV_ENABLED;
}
out: return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -4809,13 +4863,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
- if (!(adap->flags & DEV_ENABLED)) {
+ if (!(adap->flags & CXGB4_DEV_ENABLED)) {
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Cannot reenable PCI "
"device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- adap->flags |= DEV_ENABLED;
+ adap->flags |= CXGB4_DEV_ENABLED;
}
pci_set_master(pdev);
@@ -4827,7 +4881,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
if (adap_init1(adap, &c))
return PCI_ERS_RESULT_DISCONNECT;
@@ -4939,7 +4993,7 @@ static int cfg_queues(struct adapter *adap)
* at all is problematic ...
*/
niqflint = adap->params.pfres.niqflint - 1;
- if (!(adap->flags & USING_MSIX))
+ if (!(adap->flags & CXGB4_USING_MSIX))
niqflint--;
neq = adap->params.pfres.neq / 2;
avail_eth_qsets = min(niqflint, neq);
@@ -5221,8 +5275,8 @@ static void print_adapter_info(struct adapter *adapter)
/* Software/Hardware configuration */
dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
is_offload(adapter) ? "R" : "",
- ((adapter->flags & USING_MSIX) ? "MSI-X" :
- (adapter->flags & USING_MSI) ? "MSI" : ""),
+ ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
+ (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
is_offload(adapter) ? "Offload" : "non-Offload");
}
@@ -5297,13 +5351,13 @@ static void free_some_resources(struct adapter *adapter)
kfree(adap2pinfo(adapter, i)->rss);
free_netdev(adapter->port[i]);
}
- if (adapter->flags & FW_OK)
+ if (adapter->flags & CXGB4_FW_OK)
t4_fw_bye(adapter, adapter->pf);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
- NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
static int t4_get_chip_type(struct adapter *adap, int ver)
@@ -5601,7 +5655,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* PCI device has been enabled */
- adapter->flags |= DEV_ENABLED;
+ adapter->flags |= CXGB4_DEV_ENABLED;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5619,7 +5673,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* using Relaxed Ordering.
*/
if (!pcie_relaxed_ordering_enabled(pdev))
- adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+ adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
@@ -5710,7 +5764,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_RXHASH |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
@@ -5719,9 +5773,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM |
NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_HW_TLS_RECORD;
}
if (highdma)
@@ -5748,7 +5805,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
- if (adapter->flags & FW_OK) {
+ if (adapter->flags & CXGB4_FW_OK) {
err = t4_port_init(adapter, func, func, 0);
if (err)
goto out_free_dev;
@@ -5770,7 +5827,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- if (!(adapter->flags & FW_OK))
+ if (!(adapter->flags & CXGB4_FW_OK))
goto fw_attach_fail;
/* Configure queues and allocate tables now, they can be needed as
@@ -5864,9 +5921,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
- adapter->flags |= USING_MSIX;
+ adapter->flags |= CXGB4_USING_MSIX;
else if (msi > 0 && pci_enable_msi(pdev) == 0) {
- adapter->flags |= USING_MSI;
+ adapter->flags |= CXGB4_USING_MSI;
if (msi > 1)
free_msix_info(adapter);
}
@@ -5934,7 +5991,7 @@ fw_attach_fail:
cxgb4_ptp_init(adapter);
if (IS_REACHABLE(CONFIG_THERMAL) &&
- !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
+ !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
cxgb4_thermal_init(adapter);
print_adapter_info(adapter);
@@ -5943,7 +6000,7 @@ fw_attach_fail:
out_free_dev:
t4_free_sge_resources(adapter);
free_some_resources(adapter);
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4_USING_MSIX)
free_msix_info(adapter);
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
@@ -5976,7 +6033,12 @@ static void remove_one(struct pci_dev *pdev)
return;
}
- adapter->flags |= SHUTTING_DOWN;
+ /* If we allocated filters, free up state associated with any
+ * valid filters ...
+ */
+ clear_all_filters(adapter);
+
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
if (adapter->pf == 4) {
int i;
@@ -6006,15 +6068,10 @@ static void remove_one(struct pci_dev *pdev)
if (IS_REACHABLE(CONFIG_THERMAL))
cxgb4_thermal_remove(adapter);
- /* If we allocated filters, free up state associated with any
- * valid filters ...
- */
- clear_all_filters(adapter);
-
- if (adapter->flags & FULL_INIT_DONE)
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
cxgb_down(adapter);
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4_USING_MSIX)
free_msix_info(adapter);
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
@@ -6038,9 +6095,9 @@ static void remove_one(struct pci_dev *pdev)
#endif
iounmap(adapter->regs);
pci_disable_pcie_error_reporting(pdev);
- if ((adapter->flags & DEV_ENABLED)) {
+ if ((adapter->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
- adapter->flags &= ~DEV_ENABLED;
+ adapter->flags &= ~CXGB4_DEV_ENABLED;
}
pci_release_regions(pdev);
kfree(adapter->mbox_log);
@@ -6066,7 +6123,7 @@ static void shutdown_one(struct pci_dev *pdev)
return;
}
- adapter->flags |= SHUTTING_DOWN;
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
if (adapter->pf == 4) {
int i;
@@ -6084,7 +6141,7 @@ static void shutdown_one(struct pci_dev *pdev)
disable_msi(adapter);
t4_sge_stop(adapter);
- if (adapter->flags & FW_OK)
+ if (adapter->flags & CXGB4_FW_OK)
t4_fw_bye(adapter, adapter->mbox);
}
}
@@ -6112,15 +6169,24 @@ static int __init cxgb4_init_module(void)
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
- debugfs_remove(cxgb4_debugfs_root);
+ goto err_pci;
#if IS_ENABLED(CONFIG_IPV6)
if (!inet6addr_registered) {
- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
- inet6addr_registered = true;
+ ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+ if (ret)
+ pci_unregister_driver(&cxgb4_driver);
+ else
+ inet6addr_registered = true;
}
#endif
+ if (ret == 0)
+ return ret;
+
+err_pci:
+ debugfs_remove(cxgb4_debugfs_root);
+
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 9f9d6cae39d5..58a039c3224a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -378,10 +378,10 @@ static void cxgb4_init_ptp_timer(struct adapter *adapter)
int err;
memset(&c, 0, sizeof(c));
- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F |
- FW_PTP_CMD_PORTID_V(0));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.scmd.sc = FW_PTP_SC_INIT_TIMER;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 6167924a7705..c34fb108a689 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -228,6 +228,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->val.ivlan = vlan_tci;
fs->mask.ivlan = vlan_tci_mask;
+ fs->val.ivlan_vld = 1;
+ fs->mask.ivlan_vld = 1;
+
/* Chelsio adapters use ivlan_vld bit to match vlan packets
* as 802.1Q. Also, when vlan tag is present in packets,
* ethtype match is used then to match on ethtype of inner
@@ -238,8 +241,6 @@ static void cxgb4_process_flow_match(struct net_device *dev,
* ethtype value with ethtype of inner header.
*/
if (fs->val.ethtype == ETH_P_8021Q) {
- fs->val.ivlan_vld = 1;
- fs->mask.ivlan_vld = 1;
fs->val.ethtype = 0;
fs->mask.ethtype = 0;
}
@@ -719,11 +720,8 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
ret = ctx.result;
/* Check if hw returned error for filter creation */
- if (ret) {
- netdev_err(dev, "%s: filter creation err %d\n",
- __func__, ret);
+ if (ret)
goto free_entry;
- }
ch_flower->tc_flower_cookie = cls->cookie;
ch_flower->filter_id = ctx.tid;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 77be2584d572..03ed86f9c834 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -78,7 +78,7 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
unsigned long flags;
spin_lock_irqsave(&bmap->lock, flags);
- __clear_bit(msix_idx, bmap->msix_bmap);
+ __clear_bit(msix_idx, bmap->msix_bmap);
spin_unlock_irqrestore(&bmap->lock, flags);
}
@@ -147,7 +147,7 @@ static int alloc_uld_rxqs(struct adapter *adap,
per_chan = rxq_info->nrxq / adap->params.nports;
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
msi_idx = 1;
else
msi_idx = -((int)s->intrq.abs_id + 1);
@@ -195,7 +195,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
sizeof(unsigned short),
GFP_KERNEL);
@@ -206,7 +206,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
- if (adap->flags & FULL_INIT_DONE &&
+ if (adap->flags & CXGB4_FULL_INIT_DONE &&
!ret && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
unsigned int cmplqid;
@@ -239,7 +239,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
u32 param, cmdop, cmplqid = 0;
int i;
@@ -258,7 +258,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
kfree(rxq_info->msix_tbl);
}
@@ -273,7 +273,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
if (!rxq_info)
return -ENOMEM;
- if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
i = s->nqs_per_uld;
rxq_info->nrxq = roundup(i, adap->params.nports);
} else {
@@ -284,7 +284,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
if (!uld_info->ciq) {
rxq_info->nciq = 0;
} else {
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
rxq_info->nciq = min_t(int, s->nqs_per_uld,
num_online_cpus());
else
@@ -611,10 +611,10 @@ static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type
adap->uld[type].add = NULL;
release_sge_txq_uld(adap, type);
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_sge_queues_uld(adap, type);
@@ -673,7 +673,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->sge_egrstatuspagesize = adap->sge.stat_len;
lld->sge_pktshift = adap->sge.pktshift;
lld->ulp_crypto = adap->params.crypto;
- lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
lld->max_ordird_qp = adap->params.max_ordird_qp;
lld->max_ird_adapter = adap->params.max_ird_adapter;
lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
@@ -702,7 +702,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
adap->uld[uld].handle = handle;
t4_register_netevent_notifier();
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
}
@@ -737,13 +737,13 @@ void cxgb4_register_uld(enum cxgb4_uld type,
ret = setup_sge_queues_uld(adap, type, p->lro);
if (ret)
goto free_queues;
- if (adap->flags & USING_MSIX) {
+ if (adap->flags & CXGB4_USING_MSIX) {
name_msix_vecs_uld(adap, type);
ret = request_msix_queue_irqs_uld(adap, type);
if (ret)
goto free_rxq;
}
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type);
if (adap->uld[type].add)
goto free_irq;
@@ -754,9 +754,9 @@ void cxgb4_register_uld(enum cxgb4_uld type,
uld_attach(adap, type);
continue;
free_irq:
- if (adap->flags & FULL_INIT_DONE)
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
free_sge_queues_uld(adap, type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 233e15ef496c..cb41c8413eb7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
break;
default:
- BUG_ON(1);
+ BUG();
}
return buf_size;
@@ -3376,7 +3376,7 @@ static irqreturn_t t4_intr_msi(int irq, void *cookie)
{
struct adapter *adap = cookie;
- if (adap->flags & MASTER_PF)
+ if (adap->flags & CXGB4_MASTER_PF)
t4_slow_intr_handler(adap);
process_intrq(adap);
return IRQ_HANDLED;
@@ -3392,7 +3392,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
struct adapter *adap = cookie;
t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
- if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
+ if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
process_intrq(adap))
return IRQ_HANDLED;
return IRQ_NONE; /* probably shared interrupt */
@@ -3407,9 +3407,9 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
*/
irq_handler_t t4_intr_handler(struct adapter *adap)
{
- if (adap->flags & USING_MSIX)
+ if (adap->flags & CXGB4_USING_MSIX)
return t4_sge_intr_msix;
- if (adap->flags & USING_MSI)
+ if (adap->flags & CXGB4_USING_MSI)
return t4_intr_msi;
return t4_intr_intx;
}
@@ -3442,7 +3442,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
* global Master PF activities like checking for chip ingress stalls,
* etc.
*/
- if (!(adap->flags & MASTER_PF))
+ if (!(adap->flags & CXGB4_MASTER_PF))
goto done;
t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
@@ -3550,7 +3550,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
- int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
+ int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
/* Size needs to be multiple of 16, including status entry. */
iq->size = roundup(iq->size, 16);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 82b70a565e24..9a54302bb046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -77,7 +77,7 @@ int cxgb4_get_srq_entry(struct net_device *dev,
adap = netdev2adap(dev);
s = adap->srq;
- if (!(adap->flags & FULL_INIT_DONE) || !s)
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s)
goto out;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 3948926f344e..93feb258067b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -198,7 +198,7 @@ static void t4_report_fw_error(struct adapter *adap)
if (pcie_fw & PCIE_FW_ERR_F) {
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
}
}
@@ -3794,7 +3794,7 @@ int t4_load_phy_fw(struct adapter *adap,
/* If we have version number support, then check to see if the adapter
* already has up-to-date PHY firmware loaded.
*/
- if (phy_fw_version) {
+ if (phy_fw_version) {
new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
if (ret < 0)
@@ -3964,6 +3964,14 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
}
+/* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
+ * Capabilities which we control with separate controls -- see, for instance,
+ * Pause Frames and Forward Error Correction. In order to determine what the
+ * full set of Advertised Port Capabilities are, the base Advertised Port
+ * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
+ * Port Capabilities associated with those other controls. See
+ * t4_link_acaps() for how this is done.
+ */
#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
FW_PORT_CAP32_ANEG)
@@ -4061,6 +4069,9 @@ static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
/* Translate Common Code Pause specification into Firmware Port Capabilities */
static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
{
+ /* Translate orthogonal RX/TX Pause Controls for L1 Configure
+ * commands, etc.
+ */
fw_port_cap32_t fw_pause = 0;
if (cc_pause & PAUSE_RX)
@@ -4070,6 +4081,19 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
if (!(cc_pause & PAUSE_AUTONEG))
fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
+ /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
+ * Asymetrical Pause for use in reporting to upper layer OS code, etc.
+ * Note that these bits are ignored in L1 Configure commands.
+ */
+ if (cc_pause & PAUSE_RX) {
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
+ else
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ } else if (cc_pause & PAUSE_TX) {
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ }
+
return fw_pause;
}
@@ -4100,31 +4124,22 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
}
/**
- * t4_link_l1cfg - apply link configuration to MAC/PHY
+ * t4_link_acaps - compute Link Advertised Port Capabilities
* @adapter: the adapter
- * @mbox: the Firmware Mailbox to use
* @port: the Port ID
* @lc: the Port's Link Configuration
- * @sleep_ok: if true we may sleep while awaiting command completion
- * @timeout: time to wait for command to finish before timing out
- * (negative implies @sleep_ok=false)
*
- * Set up a port's MAC and PHY according to a desired link configuration.
- * - If the PHY can auto-negotiate first decide what to advertise, then
- * enable/disable auto-negotiation as desired, and reset.
- * - If the PHY does not auto-negotiate just reset it.
- * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
- * otherwise do it later based on the outcome of auto-negotiation.
+ * Synthesize the Advertised Port Capabilities we'll be using based on
+ * the base Advertised Port Capabilities (which have been filtered by
+ * ADVERT_MASK) plus the individual controls for things like Pause
+ * Frames, Forward Error Correction, MDI, etc.
*/
-int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
- unsigned int port, struct link_config *lc,
- bool sleep_ok, int timeout)
+fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
+ struct link_config *lc)
{
- unsigned int fw_caps = adapter->params.fw_caps_support;
- fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
- struct fw_port_cmd cmd;
+ fw_port_cap32_t fw_fc, fw_fec, acaps;
unsigned int fw_mdi;
- int ret;
+ char cc_fec;
fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
@@ -4151,18 +4166,15 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
* init_link_config().
*/
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
- if (lc->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- rcap = lc->acaps | fw_fc | fw_fec;
+ acaps = lc->acaps | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
+ acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else {
- rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
+ acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
/* Some Requested Port Capabilities are trivially wrong if they exceed
@@ -4173,15 +4185,50 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
* we need to exclude this from this check in order to maintain
* compatibility ...
*/
- if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
- dev_err(adapter->pdev_dev,
- "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
- rcap, lc->pcaps);
+ if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
+ dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
+ acaps, lc->pcaps);
return -EINVAL;
}
- /* And send that on to the Firmware ...
+ return acaps;
+}
+
+/**
+ * t4_link_l1cfg_core - apply link configuration to MAC/PHY
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+ unsigned int port, struct link_config *lc,
+ u8 sleep_ok, int timeout)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd cmd;
+ fw_port_cap32_t rcap;
+ int ret;
+
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
+ lc->autoneg == AUTONEG_ENABLE) {
+ return -EINVAL;
+ }
+
+ /* Compute our Requested Port Capabilities and send that on to the
+ * Firmware.
*/
+ rcap = t4_link_acaps(adapter, port, lc);
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
@@ -4211,7 +4258,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
rcap, -ret);
return ret;
}
- return ret;
+ return 0;
}
/**
@@ -5243,7 +5290,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
static unsigned int t4_use_ldst(struct adapter *adap)
{
- return (adap->flags & FW_OK) && !adap->use_bd;
+ return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
}
/**
@@ -5906,7 +5953,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
{
int i, ofst = idx * 4;
u32 data_reg, mask_reg, cfg;
- u32 multitrc = TRCMULTIFILTER_F;
if (!enable) {
t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
@@ -5926,7 +5972,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
* maximum packet capture size of 9600 bytes is recommended.
* Also in this mode, only trace0 can be enabled and running.
*/
- multitrc = 0;
if (tp->snap_len > 9600 || idx)
return -EINVAL;
}
@@ -6134,7 +6179,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
* ( MPSBGMAP[Port 1] << 8 ) |
* ( MPSBGMAP[Port 0] << 0 ))
*/
- if (adapter->flags & FW_OK) {
+ if (adapter->flags & CXGB4_FW_OK) {
u32 param, val;
int ret;
@@ -7095,10 +7140,10 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
if (!t4_fw_matches_chip(adap, fw_hdr))
return -EINVAL;
- /* Disable FW_OK flag so that mbox commands with FW_OK flag set
- * wont be sent when we are flashing FW.
+ /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
+ * set wont be sent when we are flashing FW.
*/
- adap->flags &= ~FW_OK;
+ adap->flags &= ~CXGB4_FW_OK;
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
@@ -7137,7 +7182,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
*/
(void)t4_init_devlog_params(adap);
out:
- adap->flags |= FW_OK;
+ adap->flags |= CXGB4_FW_OK;
return ret;
}
@@ -7208,10 +7253,21 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size)
{
unsigned int page_shift = fls(page_size) - 1;
+ unsigned int sge_hps = page_shift - 10;
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
+ t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
+ HOSTPAGESIZEPF0_V(sge_hps) |
+ HOSTPAGESIZEPF1_V(sge_hps) |
+ HOSTPAGESIZEPF2_V(sge_hps) |
+ HOSTPAGESIZEPF3_V(sge_hps) |
+ HOSTPAGESIZEPF4_V(sge_hps) |
+ HOSTPAGESIZEPF5_V(sge_hps) |
+ HOSTPAGESIZEPF6_V(sge_hps) |
+ HOSTPAGESIZEPF7_V(sge_hps));
+
if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
@@ -8694,7 +8750,7 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
{
unsigned int fw_caps = pi->adapter->params.fw_caps_support;
struct fw_port_cmd port_cmd;
- unsigned int action, link_ok, speed, mtu;
+ unsigned int action, link_ok, mtu;
fw_port_cap32_t linkattr;
int ret;
@@ -8728,7 +8784,6 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
mtu = FW_PORT_CMD_MTU32_G(
be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
}
- speed = fwcap_to_speed(linkattr);
*link_okp = link_ok;
*speedp = fwcap_to_speed(linkattr);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index c62a0c830705..38dd41eb959e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -56,6 +56,7 @@ enum {
CPL_TX_DATA_ISO = 0x1F,
CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_GET_TCB_RPL = 0x22,
CPL_L2T_WRITE_RPL = 0x23,
CPL_PASS_OPEN_RPL = 0x24,
CPL_ACT_OPEN_RPL = 0x25,
@@ -688,6 +689,13 @@ struct cpl_get_tcb {
#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
#define NO_REPLY_F NO_REPLY_V(1U)
+struct cpl_get_tcb_rpl {
+ union opcode_tid ot;
+ __u8 cookie;
+ __u8 status;
+ __be16 len;
+};
+
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index bf7325f6d553..0c5373462ced 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -218,6 +218,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 3297ce025e8b..1b9afb192f7f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -41,6 +41,14 @@
#define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S)
#define TCB_T_FLAGS_W 1
+#define TCB_T_FLAGS_S 0
+#define TCB_T_FLAGS_M 0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_RQ_START_W 30
+#define TCB_RQ_START_S 0
+#define TCB_RQ_START_M 0x3ffffffULL
+#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
@@ -66,4 +74,8 @@
#define TCB_RX_FRAG3_LEN_RAW_W 29
#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
#define TCB_PDU_HDR_LEN_W 31
+
+#define TF_RX_PDU_OUT_S 49
+#define TF_RX_PDU_OUT_V(x) ((__u64)(x) << TF_RX_PDU_OUT_S)
+
#endif /* __T4_TCB_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 631f1663f4e0..b2a618e72fcf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1312,6 +1312,14 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
+ FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
+};
+
+/* Virtual link state as seen by the specified VF */
+enum vf_link_states {
+ FW_VF_LINK_STATE_AUTO = 0x00,
+ FW_VF_LINK_STATE_ENABLE = 0x01,
+ FW_VF_LINK_STATE_DISABLE = 0x02,
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 9125ddd89dd1..a02b1dff403e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x16
-#define T4FW_VERSION_MICRO 0x09
+#define T4FW_VERSION_MINOR 0x17
+#define T4FW_VERSION_MICRO 0x03
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x16
-#define T5FW_VERSION_MICRO 0x09
+#define T5FW_VERSION_MINOR 0x17
+#define T5FW_VERSION_MICRO 0x03
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x16
-#define T6FW_VERSION_MICRO 0x09
+#define T6FW_VERSION_MINOR 0x17
+#define T6FW_VERSION_MICRO 0x03
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 26f48a14d2f9..3782e48dada2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -406,11 +406,12 @@ struct adapter {
};
enum { /* adapter flags */
- FULL_INIT_DONE = (1UL << 0),
- USING_MSI = (1UL << 1),
- USING_MSIX = (1UL << 2),
- QUEUES_BOUND = (1UL << 3),
- ROOT_NO_RELAXED_ORDERING = (1UL << 4),
+ CXGB4VF_FULL_INIT_DONE = (1UL << 0),
+ CXGB4VF_USING_MSI = (1UL << 1),
+ CXGB4VF_USING_MSIX = (1UL << 2),
+ CXGB4VF_QUEUES_BOUND = (1UL << 3),
+ CXGB4VF_ROOT_NO_RELAXED_ORDERING = (1UL << 4),
+ CXGB4VF_FW_OK = (1UL << 5),
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b8e0fa59dca2..6d4cf3d0b2f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -155,6 +155,8 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
const char *fc;
const struct port_info *pi = netdev_priv(dev);
+ netif_carrier_on(dev);
+
switch (pi->link_cfg.speed) {
case 100:
s = "100Mbps";
@@ -200,6 +202,7 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
} else {
+ netif_carrier_off(dev);
netdev_info(dev, "link down\n");
}
}
@@ -339,16 +342,6 @@ static int link_start(struct net_device *dev)
if (ret == 0)
ret = t4vf_enable_pi(pi->adapter, pi, true, true);
- /* The Virtual Interfaces are connected to an internal switch on the
- * chip which allows VIs attached to the same port to talk to each
- * other even when the port link is down. As a result, we generally
- * want to always report a VI's link as being "up", provided there are
- * no errors in enabling vi.
- */
-
- if (ret == 0)
- netif_carrier_on(dev);
-
return ret;
}
@@ -469,7 +462,7 @@ static void enable_rx(struct adapter *adapter)
* The interrupt queue doesn't use NAPI so we do the 0-increment of
* its Going To Sleep register here to get it started.
*/
- if (adapter->flags & USING_MSI)
+ if (adapter->flags & CXGB4VF_USING_MSI)
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
CIDXINC_V(0) |
SEINTARM_V(s->intrq.intr_params) |
@@ -525,8 +518,8 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
break;
}
cpl = (void *)p;
- /*FALLTHROUGH*/
}
+ /* Fall through */
case CPL_SGE_EGR_UPDATE: {
/*
@@ -613,7 +606,7 @@ static int setup_sge_queues(struct adapter *adapter)
* the intrq's queue ID as the interrupt forwarding queue for the
* subsequent calls ...
*/
- if (adapter->flags & USING_MSI) {
+ if (adapter->flags & CXGB4VF_USING_MSI) {
err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
adapter->port[0], 0, NULL, NULL);
if (err)
@@ -773,7 +766,7 @@ static int adapter_up(struct adapter *adapter)
* adapter setup. Once we've done this, many of our adapter
* parameters can no longer be changed ...
*/
- if ((adapter->flags & FULL_INIT_DONE) == 0) {
+ if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) {
err = setup_sge_queues(adapter);
if (err)
return err;
@@ -783,17 +776,18 @@ static int adapter_up(struct adapter *adapter)
return err;
}
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4VF_USING_MSIX)
name_msix_vecs(adapter);
- adapter->flags |= FULL_INIT_DONE;
+ adapter->flags |= CXGB4VF_FULL_INIT_DONE;
}
/*
* Acquire our interrupt resources. We only support MSI-X and MSI.
*/
- BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
- if (adapter->flags & USING_MSIX)
+ BUG_ON((adapter->flags &
+ (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
+ if (adapter->flags & CXGB4VF_USING_MSIX)
err = request_msix_queue_irqs(adapter);
else
err = request_irq(adapter->pdev->irq,
@@ -824,7 +818,7 @@ static void adapter_down(struct adapter *adapter)
/*
* Free interrupt resources.
*/
- if (adapter->flags & USING_MSIX)
+ if (adapter->flags & CXGB4VF_USING_MSIX)
free_msix_queue_irqs(adapter);
else
free_irq(adapter->pdev->irq, adapter);
@@ -845,6 +839,13 @@ static int cxgb4vf_open(struct net_device *dev)
struct adapter *adapter = pi->adapter;
/*
+ * If we don't have a connection to the firmware there's nothing we
+ * can do.
+ */
+ if (!(adapter->flags & CXGB4VF_FW_OK))
+ return -ENXIO;
+
+ /*
* If this is the first interface that we're opening on the "adapter",
* bring the "adapter" up now.
*/
@@ -1233,7 +1234,7 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4VF_USING_MSIX) {
struct sge_eth_rxq *rxq;
int nqsets;
@@ -1478,22 +1479,6 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev,
base->duplex = DUPLEX_UNKNOWN;
}
- if (pi->link_cfg.fc & PAUSE_RX) {
- if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Pause);
- } else {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
- } else if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
-
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
@@ -1648,7 +1633,7 @@ static int cxgb4vf_set_ringparam(struct net_device *dev,
rp->tx_pending < MIN_TXQ_ENTRIES)
return -EINVAL;
- if (adapter->flags & FULL_INIT_DONE)
+ if (adapter->flags & CXGB4VF_FULL_INIT_DONE)
return -EBUSY;
for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
@@ -1932,6 +1917,8 @@ static void cxgb4vf_get_wol(struct net_device *dev,
* TCP Segmentation Offload flags which we support.
*/
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_link_ksettings = cxgb4vf_get_link_ksettings,
@@ -2163,7 +2150,7 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
static int sge_queue_entries(const struct adapter *adapter)
{
return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
- ((adapter->flags & USING_MSI) != 0);
+ ((adapter->flags & CXGB4VF_USING_MSI) != 0);
}
static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
@@ -2309,7 +2296,7 @@ static int sge_qstats_show(struct seq_file *seq, void *v)
static int sge_qstats_entries(const struct adapter *adapter)
{
return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
- ((adapter->flags & USING_MSI) != 0);
+ ((adapter->flags & CXGB4VF_USING_MSI) != 0);
}
static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
@@ -2384,19 +2371,7 @@ static int resources_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int resources_open(struct inode *inode, struct file *file)
-{
- return single_open(file, resources_show, inode->i_private);
-}
-
-static const struct file_operations resources_proc_fops = {
- .owner = THIS_MODULE,
- .open = resources_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(resources);
/*
* Show Virtual Interfaces.
@@ -2480,7 +2455,7 @@ static struct cxgb4vf_debugfs_entry debugfs_files[] = {
{ "mboxlog", 0444, &mboxlog_fops },
{ "sge_qinfo", 0444, &sge_qinfo_debugfs_fops },
{ "sge_qstats", 0444, &sge_qstats_proc_fops },
- { "resources", 0444, &resources_proc_fops },
+ { "resources", 0444, &resources_fops },
{ "interfaces", 0444, &interfaces_proc_fops },
};
@@ -2730,6 +2705,7 @@ static int adap_init0(struct adapter *adapter)
*/
size_nports_qsets(adapter);
+ adapter->flags |= CXGB4VF_FW_OK;
return 0;
}
@@ -2764,7 +2740,8 @@ static void cfg_queues(struct adapter *adapter)
* support. In particular, this means that we need to know what kind
* of interrupts we'll be using ...
*/
- BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+ BUG_ON((adapter->flags &
+ (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
/*
* Count the number of 10GbE Virtual Interfaces that we have.
@@ -3090,11 +3067,13 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* using Relaxed Ordering.
*/
if (!pcie_relaxed_ordering_enabled(pdev))
- adapter->flags |= ROOT_NO_RELAXED_ORDERING;
+ adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING;
err = adap_init0(adapter);
if (err)
- goto err_unmap_bar;
+ dev_err(&pdev->dev,
+ "Adapter initialization failed, error %d. Continuing in debug mode\n",
+ err);
/* Initialize hash mac addr list */
INIT_LIST_HEAD(&adapter->mac_hlist);
@@ -3119,13 +3098,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
break;
port_id = ffs(pmask) - 1;
pmask &= ~(1 << port_id);
- viid = t4vf_alloc_vi(adapter, port_id);
- if (viid < 0) {
- dev_err(&pdev->dev, "cannot allocate VI for port %d:"
- " err=%d\n", port_id, viid);
- err = viid;
- goto err_free_dev;
- }
/*
* Allocate our network device and stitch things together.
@@ -3133,7 +3105,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_PORT_QSETS);
if (netdev == NULL) {
- t4vf_free_vi(adapter, viid);
err = -ENOMEM;
goto err_free_dev;
}
@@ -3143,26 +3114,21 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
pi->adapter = adapter;
pi->pidx = pidx;
pi->port_id = port_id;
- pi->viid = viid;
/*
* Initialize the starting state of our "port" and register
* it.
*/
pi->xact_addr_filt = -1;
- netif_carrier_off(netdev);
netdev->irq = pdev->irq;
- netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
- netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HIGHDMA;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 81;
@@ -3173,6 +3139,23 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->dev_port = pi->port_id;
/*
+ * If we haven't been able to contact the firmware, there's
+ * nothing else we can do for this "port" ...
+ */
+ if (!(adapter->flags & CXGB4VF_FW_OK))
+ continue;
+
+ viid = t4vf_alloc_vi(adapter, port_id);
+ if (viid < 0) {
+ dev_err(&pdev->dev,
+ "cannot allocate VI for port %d: err=%d\n",
+ port_id, viid);
+ err = viid;
+ goto err_free_dev;
+ }
+ pi->viid = viid;
+
+ /*
* Initialize the hardware/software state for the port.
*/
err = t4vf_port_init(adapter, pidx);
@@ -3209,7 +3192,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* get MSI interrupts we bail with the error.
*/
if (msi == MSI_MSIX && enable_msix(adapter) == 0)
- adapter->flags |= USING_MSIX;
+ adapter->flags |= CXGB4VF_USING_MSIX;
else {
if (msi == MSI_MSIX) {
dev_info(adapter->pdev_dev,
@@ -3229,7 +3212,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
" err=%d\n", err);
goto err_free_dev;
}
- adapter->flags |= USING_MSI;
+ adapter->flags |= CXGB4VF_USING_MSI;
}
/* Now that we know how many "ports" we have and what interrupt
@@ -3259,6 +3242,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
continue;
}
+ netif_carrier_off(netdev);
set_bit(pidx, &adapter->registered_device_map);
}
if (adapter->registered_device_map == 0) {
@@ -3287,8 +3271,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
for_each_port(adapter, pidx) {
dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
adapter->port[pidx]->name,
- (adapter->flags & USING_MSIX) ? "MSI-X" :
- (adapter->flags & USING_MSI) ? "MSI" : "");
+ (adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" :
+ (adapter->flags & CXGB4VF_USING_MSI) ? "MSI" : "");
}
/*
@@ -3301,12 +3285,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* so far and return the error.
*/
err_disable_interrupts:
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4VF_USING_MSIX;
+ } else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4VF_USING_MSI;
}
err_free_dev:
@@ -3315,13 +3299,13 @@ err_free_dev:
if (netdev == NULL)
continue;
pi = netdev_priv(netdev);
- t4vf_free_vi(adapter, pi->viid);
+ if (pi->viid)
+ t4vf_free_vi(adapter, pi->viid);
if (test_bit(pidx, &adapter->registered_device_map))
unregister_netdev(netdev);
free_netdev(netdev);
}
-err_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
@@ -3350,6 +3334,7 @@ err_disable_device:
static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
/*
* Tear down driver state associated with device.
@@ -3365,12 +3350,12 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
if (test_bit(pidx, &adapter->registered_device_map))
unregister_netdev(adapter->port[pidx]);
t4vf_sge_stop(adapter);
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4VF_USING_MSIX;
+ } else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4VF_USING_MSI;
}
/*
@@ -3393,13 +3378,19 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
continue;
pi = netdev_priv(netdev);
- t4vf_free_vi(adapter, pi->viid);
+ if (pi->viid)
+ t4vf_free_vi(adapter, pi->viid);
free_netdev(netdev);
}
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
kfree(adapter->mbox_log);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
kfree(adapter);
}
@@ -3436,12 +3427,12 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
* Interrupts allowing various internal pathways to drain.
*/
t4vf_sge_stop(adapter);
- if (adapter->flags & USING_MSIX) {
+ if (adapter->flags & CXGB4VF_USING_MSIX) {
pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
+ adapter->flags &= ~CXGB4VF_USING_MSIX;
+ } else if (adapter->flags & CXGB4VF_USING_MSI) {
pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
+ adapter->flags &= ~CXGB4VF_USING_MSI;
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f6885313b28f..e8d5e5b99439 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2044,8 +2044,9 @@ static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
*/
irq_handler_t t4vf_intr_handler(struct adapter *adapter)
{
- BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
- if (adapter->flags & USING_MSIX)
+ BUG_ON((adapter->flags &
+ (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
+ if (adapter->flags & CXGB4VF_USING_MSIX)
return t4vf_sge_intr_msix;
else
return t4vf_intr_msi;
@@ -2209,7 +2210,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
struct port_info *pi = netdev_priv(dev);
struct fw_iq_cmd cmd, rpl;
int ret, iqandst, flsz = 0;
- int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING);
+ int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING);
/*
* If we're using MSI interrupts and we're not initializing the
@@ -2218,7 +2219,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
* the Forwarded Interrupt Queue must be set up before any other
* ingress queue ...
*/
- if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
+ if ((adapter->flags & CXGB4VF_USING_MSI) &&
+ rspq != &adapter->sge.intrq) {
iqandst = SGE_INTRDST_IQ;
intr_dest = adapter->sge.intrq.abs_id;
} else
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 84dff74ca9cd..8a389d617a23 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -313,7 +313,17 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
return ret;
}
+/* In the Physical Function Driver Common Code, the ADVERT_MASK is used to
+ * mask out bits in the Advertised Port Capabilities which are managed via
+ * separate controls, like Pause Frames and Forward Error Correction. In the
+ * Virtual Function Common Code, since we never perform L1 Configuration on
+ * the Link, the only things we really need to filter out are things which
+ * we decode and report separately like Speed.
+ */
#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
+ FW_PORT_CAP32_802_3_PAUSE | \
+ FW_PORT_CAP32_802_3_ASM_DIR | \
+ FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) | \
FW_PORT_CAP32_ANEG)
/**
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
ppmax = max;
/* pool size must be multiple of unsigned long */
- bmap = BITS_TO_LONGS(ppmax);
+ bmap = ppmax / BITS_PER_TYPE(unsigned long);
+ if (!bmap)
+ return NULL;
+
ppmax = (bmap * sizeof(unsigned long)) << 3;
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
if (reserve_factor) {
ppmax_pool = ppmax / reserve_factor;
pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+ if (!pool) {
+ ppmax_pool = 0;
+ reserve_factor = 0;
+ }
pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 4f68da3b5a2a..6c02e722e72e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -156,6 +156,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
select MDIO
+ select MDIO_DEVICE
imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 0b757349067c..591684336882 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2823,7 +2823,7 @@ static int e100_set_features(struct net_device *netdev,
netdev->features = features;
e100_exec_cb(nic, NULL, e100_configure);
- return 0;
+ return 1;
}
static const struct net_device_ops e100_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index b2d01e4b8410..06c9dd333398 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -845,7 +845,7 @@ static int e1000_set_features(struct net_device *netdev,
else
e1000_reset(adapter);
- return 0;
+ return 1;
}
static const struct net_device_ops e1000_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index cd391376036c..21a460eba1f2 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -714,11 +714,16 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ret_val =
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&kum_reg_data);
- if (ret_val)
- return ret_val;
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ e_dbg("Error disabling far-end loopback\n");
+ } else {
+ e_dbg("Error disabling far-end loopback\n");
+ }
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val)
@@ -772,11 +777,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
return ret_val;
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ e_dbg("Error disabling far-end loopback\n");
+ } else {
+ e_dbg("Error disabling far-end loopback\n");
+ }
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 32593885abf0..ee8ca480dc35 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7001,7 +7001,7 @@ static int e1000_set_features(struct net_device *netdev,
else
e1000e_reset(adapter);
- return 0;
+ return 1;
}
static const struct net_device_ops e1000e_netdev_ops = {
@@ -7348,7 +7348,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e1000_print_device_info(adapter);
- if (pci_dev_run_wake(pdev))
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
+ if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
pm_runtime_put_noidle(&pdev->dev);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 6a6ce2665cd2..9ead83230d2b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1622,14 +1622,12 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
{
struct fm10k_q_vector *q_vector;
struct fm10k_ring *ring;
- int ring_count, size;
+ int ring_count;
ring_count = txr_count + rxr_count;
- size = sizeof(struct fm10k_q_vector) +
- (sizeof(struct fm10k_ring) * ring_count);
/* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index bee192fe2ffb..cf4fa2a56e4e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1165,7 +1165,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
* @results: Pointer array to message, results[0] is pointer to message
* @mbx: Pointer to mailbox information structure
*
- * This function is a default handler for MSI-X requests from the VF. The
+ * This function is a default handler for MSI-X requests from the VF. The
* assumption is that in this case it is acceptable to just directly
* hand off the message from the VF to the underlying shared code.
**/
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index d02fc6319452..150375a81b6c 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -44,6 +44,7 @@ i40e-objs := i40e_main.o \
i40e_diag.o \
i40e_txrx.o \
i40e_ptp.o \
+ i40e_ddp.o \
i40e_client.o \
i40e_virtchnl_pf.o \
i40e_xsk.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2963f7e058e7..c7aba90a352f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -57,6 +57,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
@@ -171,6 +172,7 @@ enum i40e_state_t {
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
__I40E_VIRTCHNL_OP_PENDING,
+ __I40E_RECOVERY_MODE,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
@@ -343,6 +345,29 @@ struct i40e_udp_port_config {
u8 filter_index;
};
+#define I40_DDP_FLASH_REGION 100
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+#define I40E_PROFILE_LIST_SIZE \
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
+#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
+#define I40E_DDP_PROFILE_NAME_MAX 64
+
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add);
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+
+struct i40e_ddp_profile_list {
+ u32 p_count;
+ struct i40e_profile_info p_info[0];
+};
+
+struct i40e_ddp_old_profile_list {
+ struct list_head list;
+ size_t old_ddp_size;
+ u8 old_ddp_buf[0];
+};
+
/* macros related to FLX_PIT */
#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
@@ -546,6 +571,8 @@ struct i40e_pf {
#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
+#define I40E_FLAG_RS_FEC BIT(25)
+#define I40E_FLAG_BASE_R_FEC BIT(26)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
@@ -609,6 +636,8 @@ struct i40e_pf {
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
+ struct timespec64 ptp_prev_hw_time;
+ ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
u32 tx_hwtstamp_timeouts;
@@ -630,6 +659,8 @@ struct i40e_pf {
u16 override_q_count;
u16 last_sw_conf_flags;
u16 last_sw_conf_valid_flags;
+ /* List to keep previous DDP profiles to be rolled back in the future */
+ struct list_head ddp_old_prof;
};
/**
@@ -811,10 +842,7 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
- /* AF_XDP zero-copy */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1106,6 +1134,8 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_save_hw_time(struct i40e_pf *pf);
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
@@ -1114,25 +1144,13 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
return !!vsi->xdp_prog;
}
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
- bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
- int qid = ring->queue_index;
-
- if (ring_is_xdp(ring))
- qid -= ring->vsi->alloc_queue_pairs;
-
- if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on)
- return NULL;
-
- return ring->vsi->xsk_umems[qid];
-}
-
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index c1a016267a09..969c7bf0e141 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -631,6 +631,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->aq.api_min_ver >= 7))
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 8))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
@@ -772,7 +777,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 4facaee6731f..fcb28bebefb7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,8 +34,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0006
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X722 0x0008
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -284,6 +284,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+ i40e_aqc_opc_lldp_restore = 0x0A0A,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@@ -1910,6 +1911,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_2_5GBASE_T = 0x30,
+ I40E_PHY_TYPE_5GBASE_T = 0x31,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
@@ -1951,19 +1954,25 @@ enum i40e_aq_phy_type {
BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
- BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \
+ BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_5GBASE_T))
+#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+#define I40E_LINK_SPEED_5GB_SHIFT 0x7
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT),
+ I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
@@ -2009,6 +2018,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40
+#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -2521,18 +2532,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
-
struct i40e_aqc_lldp_start {
u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2
u8 reserved[15];
};
@@ -2656,6 +2668,16 @@ struct i40e_aqc_lldp_stop_start_specific_agent {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+/* Restore LLDP Agent factory settings (direct 0x0A0A) */
+struct i40e_aqc_lldp_restore {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0
+#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
+
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index dd65a54cc52f..a586731cfea3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -601,11 +601,9 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_hw *hw = &pf->hw;
struct i40e_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
- u32 size;
- size = sizeof(struct i40e_qvlist_info) +
- (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
- ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
+ ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info,
+ qvlist_info->num_vectors - 1), GFP_KERNEL);
if (!ldev->qvlist_info)
return -ENOMEM;
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 76d4b678247b..6e019d94bcd4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -51,10 +51,14 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_B:
+ case I40E_DEV_ID_10G_SFP:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
+ case I40E_DEV_ID_X710_N3000:
+ case I40E_DEV_ID_XXV710_N3000:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_KX_X722:
@@ -1172,6 +1176,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
break;
case I40E_PHY_TYPE_100BASE_TX:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T:
case I40E_PHY_TYPE_10GBASE_T:
media = I40E_MEDIA_TYPE_BASET;
break;
@@ -1489,7 +1495,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 current_mode = 0;
u32 mode = 0;
int i;
@@ -1502,21 +1507,6 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
- /* ignore gpio LED src mode entries related to the activity
- * LEDs
- */
- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
- switch (current_mode) {
- case I40E_COMBINED_ACTIVITY:
- case I40E_FILTER_ACTIVITY:
- case I40E_MAC_ACTIVITY:
- case I40E_LINK_ACTIVITY:
- continue;
- default:
- break;
- }
-
mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
@@ -1536,7 +1526,6 @@ u32 i40e_led_get(struct i40e_hw *hw)
**/
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 current_mode = 0;
int i;
if (mode & 0xfffffff0)
@@ -1550,22 +1539,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
-
- /* ignore gpio LED src mode entries related to the activity
- * LEDs
- */
- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
- switch (current_mode) {
- case I40E_COMBINED_ACTIVITY:
- case I40E_FILTER_ACTIVITY:
- case I40E_MAC_ACTIVITY:
- case I40E_LINK_ACTIVITY:
- continue;
- default:
- break;
- }
-
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -3680,14 +3653,54 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
+ * i40e_aq_restore_lldp
+ * @hw: pointer to the hw struct
+ * @setting: pointer to factory setting variable or NULL
+ * @restore: True if factory settings should be restored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Restore LLDP Agent factory settings if @restore set to True. In other case
+ * only returns factory setting in AQ response.
+ **/
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_restore *cmd =
+ (struct i40e_aqc_lldp_restore *)&desc.params.raw;
+ i40e_status status;
+
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Restore LLDP not supported by current FW version.\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
+
+ if (restore)
+ cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (setting)
+ *setting = cmd->command & 1;
+
+ return status;
+}
+
+/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @persist: True if stop of LLDP should be persistent across power cycles
* @cmd_details: pointer to command details structure or NULL
*
* Stop or Shutdown the embedded LLDP Agent
**/
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -3700,6 +3713,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Stop LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -3709,13 +3730,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
* @buff: buffer for result
+ * @persist: True if start of LLDP should be persistent across power cycles
* @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
**/
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_start *cmd =
@@ -3725,6 +3747,15 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Start LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4896,6 +4927,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -5471,6 +5503,163 @@ i40e_find_segment_in_package(u32 segment_type,
return NULL;
}
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct i40e_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset) \
+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_I40E)
+ return NULL;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+static enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= cpu_to_le16(aq->flags);
+ memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
+
+ return 0;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+static enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
/**
* i40e_write_profile
* @hw: pointer to the hardware structure
@@ -5486,47 +5675,99 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
i40e_status status = 0;
struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
- u32 dev_cnt;
- u32 vendor_dev_id;
- u32 *nvm;
+ struct i40e_profile_aq_section *ddp_aq;
u32 section_size = 0;
u32 offset = 0, info = 0;
+ u32 sec_off;
u32 i;
- dev_cnt = profile->device_table_count;
+ status = i40e_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
- for (i = 0; i < dev_cnt; i++) {
- vendor_dev_id = profile->device_table[i].vendor_dev_id;
- if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
- if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
}
- if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
- }
+ return status;
+}
+
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
- nvm = (u32 *)&profile->device_table[dev_cnt];
- sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+ status = i40e_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec = (struct i40e_profile_section_header *)((u8 *)profile +
- sec_tbl->section_offset[i]);
+ I40E_SECTION_TABLE(profile, sec_tbl);
- /* Skip 'AQ', 'note' and 'name' sections */
- if (sec->section.type != SECTION_TYPE_MMIO)
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
continue;
section_size = sec->section.size +
sizeof(struct i40e_profile_section_header);
- /* Write profile */
+ /* Write roll-back MMIO section */
status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
- "Failed to write profile: offset %d, info %d",
- offset, info);
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
break;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index e281b89335b9..5f0cb9e0e7a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -886,22 +886,23 @@ out:
/**
* i40e_init_dcb
* @hw: pointer to the hw struct
+ * @enable_mib_change: enable mib change event
*
* Update DCB configuration from the Firmware
**/
-i40e_status i40e_init_dcb(struct i40e_hw *hw)
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
i40e_status ret = 0;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
if (!hw->func_caps.dcb)
- return ret;
+ return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
if (ret)
- return ret;
+ return I40E_ERR_NOT_READY;
/* Get the LLDP AdminStatus for the current port */
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
@@ -910,7 +911,7 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
/* LLDP agent disabled */
if (!adminstatus) {
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
- return ret;
+ return I40E_ERR_NOT_READY;
}
/* Get DCBX status */
@@ -919,26 +920,19 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
return ret;
/* Check the DCBX Status */
- switch (hw->dcbx_status) {
- case I40E_DCBX_STATUS_DONE:
- case I40E_DCBX_STATUS_IN_PROGRESS:
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+ hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
/* Get current DCBX configuration */
ret = i40e_get_dcb_config(hw);
if (ret)
return ret;
- break;
- case I40E_DCBX_STATUS_DISABLED:
- return ret;
- case I40E_DCBX_STATUS_NOT_STARTED:
- case I40E_DCBX_STATUS_MULTIPLE_PEERS:
- default:
- break;
+ } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ return I40E_ERR_NOT_READY;
}
/* Configure the LLDP MIB change event */
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
- if (ret)
- return ret;
+ if (enable_mib_change)
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 92d01042c1f6..fcc2d3607143 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -147,5 +147,5 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
u8 bridgetype,
struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
new file mode 100644
index 000000000000..5e08f100c413
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+
+#include <linux/firmware.h>
+
+/**
+ * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
+ * @a: new profile info
+ * @b: old profile info
+ *
+ * checks if DDP profiles are the equivalent.
+ * Returns true if profiles are the same.
+ **/
+static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a,
+ struct i40e_profile_info *b)
+{
+ return a->track_id == b->track_id &&
+ !memcmp(&a->version, &b->version, sizeof(a->version)) &&
+ !memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE);
+}
+
+/**
+ * i40e_ddp_does_profile_exist - checks if DDP profile loaded already
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile loaded already.
+ * Returns >0 if the profile exists.
+ * Returns 0 if the profile is absent.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
+ struct i40e_profile_info *pinfo)
+{
+ struct i40e_ddp_profile_list *profile_list;
+ u8 buff[I40E_PROFILE_LIST_SIZE];
+ i40e_status status;
+ int i;
+
+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+ NULL);
+ if (status)
+ return -1;
+
+ profile_list = (struct i40e_ddp_profile_list *)buff;
+ for (i = 0; i < profile_list->p_count; i++) {
+ if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i]))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * i40e_ddp_profiles_overlap - checks if DDP profiles overlap.
+ * @new: new profile info
+ * @old: old profile info
+ *
+ * checks if DDP profiles overlap.
+ * Returns true if profiles are overlap.
+ **/
+static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
+ struct i40e_profile_info *old)
+{
+ unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
+ unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+
+ /* 0x00 group must be only the first */
+ if (group_id_new == 0)
+ return true;
+ /* 0xFF group is compatible with anything else */
+ if (group_id_new == 0xFF || group_id_old == 0xFF)
+ return false;
+ /* otherwise only profiles from the same group are compatible*/
+ return group_id_old != group_id_new;
+}
+
+/**
+ * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one.
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile overlaps with existing one.
+ * Returns >0 if the profile overlaps.
+ * Returns 0 if the profile is ok.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+ struct i40e_profile_info *pinfo)
+{
+ struct i40e_ddp_profile_list *profile_list;
+ u8 buff[I40E_PROFILE_LIST_SIZE];
+ i40e_status status;
+ int i;
+
+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+ NULL);
+ if (status)
+ return -EIO;
+
+ profile_list = (struct i40e_ddp_profile_list *)buff;
+ for (i = 0; i < profile_list->p_count; i++) {
+ if (i40e_ddp_profiles_overlap(pinfo,
+ &profile_list->p_info[i]))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * i40e_add_pinfo
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+static enum i40e_status_code
+i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_profile_info *pinfo;
+ i40e_status status;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+
+ /* Clear reserved field */
+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
+
+/**
+ * i40e_del_pinfo - delete DDP profile info from NIC
+ * @hw: HW data structure
+ * @profile: DDP profile segment to be deleted
+ * @profile_info_sec: DDP profile section header
+ * @track_id: track ID of the profile for deletion
+ *
+ * Removes DDP profile from the NIC.
+ **/
+static enum i40e_status_code
+i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_profile_info *pinfo;
+ i40e_status status;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
+
+ /* Clear reserved field */
+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
+
+/**
+ * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks
+ * @netdev: net device structure (for logging purposes)
+ * @pkg_hdr: pointer to package header
+ * @size_huge: size of the whole DDP profile package in size_t
+ *
+ * Checks correctness of pkg header: Version, size too big/small, and
+ * all segment offsets alignment and boundaries. This function lets
+ * reject non DDP profile file to be loaded by administrator mistake.
+ **/
+static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
+ struct i40e_package_header *pkg_hdr,
+ size_t size_huge)
+{
+ u32 size = 0xFFFFFFFFU & size_huge;
+ u32 pkg_hdr_size;
+ u32 segment;
+
+ if (!pkg_hdr)
+ return false;
+
+ if (pkg_hdr->version.major > 0) {
+ struct i40e_ddp_version ver = pkg_hdr->version;
+
+ netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u",
+ ver.major, ver.minor, ver.update, ver.draft);
+ return false;
+ }
+ if (size_huge > size) {
+ netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
+ return false;
+ }
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+ netdev_err(netdev, "Invalid DDP profile - size is too small.");
+ return false;
+ }
+
+ pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U);
+ if (size < pkg_hdr_size) {
+ netdev_err(netdev, "Invalid DDP profile - too many segments");
+ return false;
+ }
+ for (segment = 0; segment < pkg_hdr->segment_count; ++segment) {
+ u32 offset = pkg_hdr->segment_offset[segment];
+
+ if (0xFU & offset) {
+ netdev_err(netdev,
+ "Invalid DDP profile %u segment alignment",
+ segment);
+ return false;
+ }
+ if (pkg_hdr_size > offset || offset >= size) {
+ netdev_err(netdev,
+ "Invalid DDP profile %u segment offset",
+ segment);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * i40e_ddp_load - performs DDP loading
+ * @netdev: net device structure
+ * @data: buffer containing recipe file
+ * @size: size of the buffer
+ * @is_add: true when loading profile, false when rolling back the previous one
+ *
+ * Checks correctness and loads DDP profile to the NIC. The function is
+ * also used for rolling back previously loaded profile.
+ **/
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add)
+{
+ u8 profile_info_sec[sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info)];
+ struct i40e_metadata_segment *metadata_hdr;
+ struct i40e_profile_segment *profile_hdr;
+ struct i40e_profile_info pinfo;
+ struct i40e_package_header *pkg_hdr;
+ i40e_status status;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 track_id;
+ int istatus;
+
+ pkg_hdr = (struct i40e_package_header *)data;
+ if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
+ return -EINVAL;
+
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+ netdev_err(netdev, "Invalid DDP recipe size.");
+ return -EINVAL;
+ }
+
+ /* Find beginning of segment data in buffer */
+ metadata_hdr = (struct i40e_metadata_segment *)
+ i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr);
+ if (!metadata_hdr) {
+ netdev_err(netdev, "Failed to find metadata segment in DDP recipe.");
+ return -EINVAL;
+ }
+
+ track_id = metadata_hdr->track_id;
+ profile_hdr = (struct i40e_profile_segment *)
+ i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+ if (!profile_hdr) {
+ netdev_err(netdev, "Failed to find profile segment in DDP recipe.");
+ return -EINVAL;
+ }
+
+ pinfo.track_id = track_id;
+ pinfo.version = profile_hdr->version;
+ if (is_add)
+ pinfo.op = I40E_DDP_ADD_TRACKID;
+ else
+ pinfo.op = I40E_DDP_REMOVE_TRACKID;
+
+ memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE);
+
+ /* Check if profile data already exists*/
+ istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo);
+ if (istatus < 0) {
+ netdev_err(netdev, "Failed to fetch loaded profiles.");
+ return istatus;
+ }
+ if (is_add) {
+ if (istatus > 0) {
+ netdev_err(netdev, "DDP profile already loaded.");
+ return -EINVAL;
+ }
+ istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo);
+ if (istatus < 0) {
+ netdev_err(netdev, "Failed to fetch loaded profiles.");
+ return istatus;
+ }
+ if (istatus > 0) {
+ netdev_err(netdev, "DDP profile overlaps with existing one.");
+ return -EINVAL;
+ }
+ } else {
+ if (istatus == 0) {
+ netdev_err(netdev,
+ "DDP profile for deletion does not exist.");
+ return -EINVAL;
+ }
+ }
+
+ /* Load profile data */
+ if (is_add) {
+ status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
+ if (status) {
+ if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
+ netdev_err(netdev,
+ "Profile is not supported by the device.");
+ return -EPERM;
+ }
+ netdev_err(netdev, "Failed to write DDP profile.");
+ return -EIO;
+ }
+ } else {
+ status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to remove DDP profile.");
+ return -EIO;
+ }
+ }
+
+ /* Add/remove profile to/from profile list in FW */
+ if (is_add) {
+ status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+ track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to add DDP profile info.");
+ return -EIO;
+ }
+ } else {
+ status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+ track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to restore DDP profile info.");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_ddp_restore - restore previously loaded profile and remove from list
+ * @pf: PF data struct
+ *
+ * Restores previously loaded profile stored on the list in driver memory.
+ * After rolling back removes entry from the list.
+ **/
+static int i40e_ddp_restore(struct i40e_pf *pf)
+{
+ struct i40e_ddp_old_profile_list *entry;
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ int status = 0;
+
+ if (!list_empty(&pf->ddp_old_prof)) {
+ entry = list_first_entry(&pf->ddp_old_prof,
+ struct i40e_ddp_old_profile_list,
+ list);
+ status = i40e_ddp_load(netdev, entry->old_ddp_buf,
+ entry->old_ddp_size, false);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return status;
+}
+
+/**
+ * i40e_ddp_flash - callback function for ethtool flash feature
+ * @netdev: net device structure
+ * @flash: kernel flash structure
+ *
+ * Ethtool callback function used for loading and unloading DDP profiles.
+ **/
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash)
+{
+ const struct firmware *ddp_config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int status = 0;
+
+ /* Check for valid region first */
+ if (flash->region != I40_DDP_FLASH_REGION) {
+ netdev_err(netdev, "Requested firmware region is not recognized by this driver.");
+ return -EINVAL;
+ }
+ if (pf->hw.bus.func != 0) {
+ netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface");
+ return -EINVAL;
+ }
+
+ /* If the user supplied "-" instead of file name rollback previously
+ * stored profile.
+ */
+ if (strncmp(flash->data, "-", 2) != 0) {
+ struct i40e_ddp_old_profile_list *list_entry;
+ char profile_name[sizeof(I40E_DDP_PROFILE_PATH)
+ + I40E_DDP_PROFILE_NAME_MAX];
+
+ profile_name[sizeof(profile_name) - 1] = 0;
+ strncpy(profile_name, I40E_DDP_PROFILE_PATH,
+ sizeof(profile_name) - 1);
+ strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX);
+ /* Load DDP recipe. */
+ status = request_firmware(&ddp_config, profile_name,
+ &netdev->dev);
+ if (status) {
+ netdev_err(netdev, "DDP recipe file request failed.");
+ return status;
+ }
+
+ status = i40e_ddp_load(netdev, ddp_config->data,
+ ddp_config->size, true);
+
+ if (!status) {
+ list_entry =
+ kzalloc(sizeof(struct i40e_ddp_old_profile_list) +
+ ddp_config->size, GFP_KERNEL);
+ if (!list_entry) {
+ netdev_info(netdev, "Failed to allocate memory for previous DDP profile data.");
+ netdev_info(netdev, "New profile loaded but roll-back will be impossible.");
+ } else {
+ memcpy(list_entry->old_ddp_buf,
+ ddp_config->data, ddp_config->size);
+ list_entry->old_ddp_size = ddp_config->size;
+ list_add(&list_entry->list, &pf->ddp_old_prof);
+ }
+ }
+
+ release_firmware(ddp_config);
+ } else {
+ if (!list_empty(&pf->ddp_old_prof)) {
+ status = i40e_ddp_restore(pf);
+ } else {
+ netdev_warn(netdev, "There is no DDP profile to restore.");
+ status = -ENOENT;
+ }
+ }
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index f681213db1cc..abdae22ed1a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1344,7 +1344,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
- ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+ ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Stop LLDP AQ command failed =0x%x\n",
@@ -1381,7 +1381,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
/* Continue and start FW LLDP anyways */
}
- ret = i40e_aq_start_lldp(&pf->hw, NULL);
+ ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Start LLDP AQ command failed =0x%x\n",
@@ -1665,30 +1665,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
}
- if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
- if (cnt != 1) {
- dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
- goto netdev_ops_write_done;
- }
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "tx_timeout: VSI %d not found\n", vsi_seid);
- } else if (!vsi->netdev) {
- dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
- vsi_seid);
- } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
- dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
- vsi_seid);
- } else if (rtnl_trylock()) {
- vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
- rtnl_unlock();
- dev_info(&pf->pdev->dev, "tx_timeout called\n");
- } else {
- dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
- }
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
int mtu;
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
@@ -1756,7 +1733,6 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
i40e_dbg_netdev_ops_buf);
dev_info(&pf->pdev->dev, "available commands\n");
- dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 8e46098bad57..5400c824a821 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -28,6 +28,8 @@
#define _I40E_DEVIDS_H_
/* Device IDs */
+#define I40E_DEV_ID_X710_N3000 0x0CF8
+#define I40E_DEV_ID_XXV710_N3000 0x0D58
#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_B 0x1580
@@ -41,6 +43,9 @@
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_25G_B 0x158A
#define I40E_DEV_ID_25G_SFP28 0x158B
+#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
+#define I40E_DEV_ID_10G_B 0x104F
+#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 57ff130e9b4c..56faacce19c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -461,6 +461,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
+ I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
+ I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -529,6 +531,20 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
}
+ if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ }
if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
@@ -556,17 +572,23 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
}
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
+ }
if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseLR4_Full);
+ 40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseLR4_Full);
+ 40000baseKR4_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -629,6 +651,24 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
}
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ }
+ }
/* need to add new 10G PHY types */
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) {
@@ -671,13 +711,15 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_5GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
@@ -723,14 +765,20 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_40GBASE_AOC:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
break;
case I40E_PHY_TYPE_40GBASE_SR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
break;
case I40E_PHY_TYPE_40GBASE_LR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
break;
case I40E_PHY_TYPE_25GBASE_SR:
case I40E_PHY_TYPE_25GBASE_LR:
@@ -744,6 +792,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -774,12 +829,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
10000baseT_Full);
break;
case I40E_PHY_TYPE_10GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
@@ -787,6 +848,12 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
@@ -848,6 +915,9 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -861,6 +931,10 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, advertising,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -878,6 +952,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
break;
case I40E_PHY_TYPE_25GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_ACC:
@@ -885,9 +966,15 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
-
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -922,6 +1009,12 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_LINK_SPEED_10GB:
ks->base.speed = SPEED_10000;
break;
+ case I40E_LINK_SPEED_5GB:
+ ks->base.speed = SPEED_5000;
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ ks->base.speed = SPEED_2500;
+ break;
case I40E_LINK_SPEED_1GB:
ks->base.speed = SPEED_1000;
break;
@@ -1009,6 +1102,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
break;
case I40E_MEDIA_TYPE_FIBER:
ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
ks->base.port = PORT_FIBRE;
break;
case I40E_MEDIA_TYPE_UNKNOWN:
@@ -1207,6 +1301,12 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
10000baseLR_Full))
config.link_speed |= I40E_LINK_SPEED_10GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_2_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 5000baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
20000baseKR2_Full))
config.link_speed |= I40E_LINK_SPEED_20GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -1284,6 +1384,154 @@ done:
return err;
}
+static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status status = 0;
+ u32 flags = 0;
+ int err = 0;
+
+ flags = READ_ONCE(pf->flags);
+ i40e_set_fec_in_flags(fec_cfg, &flags);
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) {
+ struct i40e_aq_set_phy_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.phy_type = abilities.phy_type;
+ config.abilities = abilities.abilities;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status) {
+ netdev_info(netdev,
+ "Set phy config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ err = -EAGAIN;
+ goto done;
+ }
+ pf->flags = flags;
+ status = i40e_update_link_info(hw);
+ if (status)
+ /* debug level message only due to relation to the link
+ * itself rather than to the FEC settings
+ * (e.g. no physical connection etc.)
+ */
+ netdev_dbg(netdev,
+ "Updating link info failed with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+
+done:
+ return err;
+}
+
+static int i40e_get_fec_param(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status status = 0;
+ int err = 0;
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ fecparam->fec = 0;
+ if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+ if ((abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_REQUEST_RS) ||
+ (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_ABILITY_RS))
+ fecparam->fec |= ETHTOOL_FEC_RS;
+ if ((abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_SET_FEC_REQUEST_KR) ||
+ (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
+ fecparam->fec |= ETHTOOL_FEC_BASER;
+ if (abilities.fec_cfg_curr_mod_ext_info == 0)
+ fecparam->fec |= ETHTOOL_FEC_OFF;
+
+ if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+done:
+ return err;
+}
+
+static int i40e_set_fec_param(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u8 fec_cfg = 0;
+ int err = 0;
+
+ if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
+ hw->device_id != I40E_DEV_ID_25G_B) {
+ err = -EPERM;
+ goto done;
+ }
+
+ switch (fecparam->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS);
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR);
+ break;
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_NONE:
+ fec_cfg = 0;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ err = -EINVAL;
+ goto done;
+ }
+
+ err = i40e_set_fec_cfg(netdev, fec_cfg);
+
+done:
+ return err;
+}
+
static int i40e_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
@@ -1399,7 +1647,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
else if (!pause->rx_pause && !pause->tx_pause)
hw->fc.requested_mode = I40E_FC_NONE;
else
- return -EINVAL;
+ return -EINVAL;
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
@@ -2198,7 +2446,7 @@ static int i40e_get_ts_info(struct net_device *dev,
return 0;
}
-static int i40e_link_test(struct net_device *netdev, u64 *data)
+static u64 i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2221,7 +2469,7 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_reg_test(struct net_device *netdev, u64 *data)
+static u64 i40e_reg_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2232,7 +2480,7 @@ static int i40e_reg_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
+static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2246,7 +2494,7 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
return *data;
}
-static int i40e_intr_test(struct net_device *netdev, u64 *data)
+static u64 i40e_intr_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -2462,10 +2710,10 @@ static int i40e_set_phys_id(struct net_device *netdev,
default:
break;
}
- if (ret)
- return -ENOENT;
- else
- return 0;
+ if (ret)
+ return -ENOENT;
+ else
+ return 0;
}
/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
@@ -4698,6 +4946,15 @@ flags_complete:
}
}
+ if (((changed_flags & I40E_FLAG_RS_FEC) ||
+ (changed_flags & I40E_FLAG_BASE_R_FEC)) &&
+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+ pf->hw.device_id != I40E_DEV_ID_25G_B) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FEC configuration\n");
+ return -EOPNOTSUPP;
+ }
+
/* Now that we've checked to ensure that the new flags are valid, load
* them into place. Since we only modify flags either (a) during
* initialization or (b) while holding the RTNL lock, we don't need
@@ -4736,6 +4993,24 @@ flags_complete:
}
}
+ if ((changed_flags & I40E_FLAG_RS_FEC) ||
+ (changed_flags & I40E_FLAG_BASE_R_FEC)) {
+ u8 fec_cfg = 0;
+
+ if (pf->flags & I40E_FLAG_RS_FEC &&
+ pf->flags & I40E_FLAG_BASE_R_FEC) {
+ fec_cfg = I40E_AQ_SET_FEC_AUTO;
+ } else if (pf->flags & I40E_FLAG_RS_FEC) {
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS);
+ } else if (pf->flags & I40E_FLAG_BASE_R_FEC) {
+ fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR);
+ }
+ if (i40e_set_fec_cfg(dev, fec_cfg))
+ dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
+ }
+
if ((changed_flags & pf->flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4746,7 +5021,7 @@ flags_complete:
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
- i40e_aq_stop_lldp(&pf->hw, true, NULL);
+ i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
/* reset local_dcbx_config to default */
dcbcfg = &pf->hw.local_dcbx_config;
@@ -4761,7 +5036,7 @@ flags_complete:
dcbcfg->pfc.willing = 1;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
} else {
- i40e_aq_start_lldp(&pf->hw, NULL);
+ i40e_aq_start_lldp(&pf->hw, false, NULL);
}
}
@@ -4929,6 +5204,12 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+ .set_eeprom = i40e_set_eeprom,
+ .get_eeprom_len = i40e_get_eeprom_len,
+ .get_eeprom = i40e_get_eeprom,
+};
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
@@ -4970,9 +5251,18 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_per_queue_coalesce = i40e_set_per_queue_coalesce,
.get_link_ksettings = i40e_get_link_ksettings,
.set_link_ksettings = i40e_set_link_ksettings,
+ .get_fecparam = i40e_get_fec_param,
+ .set_fecparam = i40e_set_fec_param,
+ .flash_device = i40e_ddp_flash,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &i40e_ethtool_ops;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ netdev->ethtool_ops = &i40e_ethtool_ops;
+ else
+ netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 53ef89da1e46..ab16fb126d53 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -49,8 +49,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 7
-#define DRV_VERSION_BUILD 6
+#define DRV_VERSION_MINOR 8
+#define DRV_VERSION_BUILD 20
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -69,6 +69,10 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
+static bool i40e_check_recovery_mode(struct i40e_pf *pf);
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
static int i40e_get_capabilities(struct i40e_pf *pf,
@@ -92,6 +96,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
@@ -100,6 +106,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
/* required last entry */
@@ -301,8 +309,9 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
**/
void i40e_service_event_schedule(struct i40e_pf *pf)
{
- if (!test_bit(__I40E_DOWN, pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ if ((!test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
+ test_bit(__I40E_RECOVERY_MODE, pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -2130,11 +2139,22 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
- set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
- dev_warn(&vsi->back->pdev->dev,
- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err),
- vsi_name);
+ if (vsi->type == I40E_VSI_MAIN) {
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err), vsi_name);
+ } else if (vsi->type == I40E_VSI_SRIOV ||
+ vsi->type == I40E_VSI_VMDQ1 ||
+ vsi->type == I40E_VSI_VMDQ2) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
+ i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+ } else {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
+ i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+ }
}
}
@@ -2677,6 +2697,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
struct i40e_vsi_context ctxt;
i40e_status ret;
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
if ((vsi->info.valid_sections &
cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
@@ -2707,6 +2731,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
struct i40e_vsi_context ctxt;
i40e_status ret;
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
if ((vsi->info.valid_sections &
cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
@@ -2972,9 +3000,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
- i40e_vlan_stripping_disable(vsi);
-
vsi->info.pvid = 0;
+
+ i40e_vlan_stripping_disable(vsi);
}
/**
@@ -3087,6 +3115,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+ int qid = ring->queue_index;
+
+ if (ring_is_xdp(ring))
+ qid -= ring->vsi->alloc_queue_pairs;
+
+ if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
+/**
* i40e_configure_tx_ring - Configure a transmit ring context and rest
* @ring: The Tx ring to configure
*
@@ -3636,7 +3684,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
(I40E_QUEUE_TYPE_TX
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ wr32(hw, I40E_QINT_TQCTL(nextqp), val);
}
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
@@ -4003,7 +4051,8 @@ static irqreturn_t i40e_intr(int irq, void *data)
enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- if (!test_bit(__I40E_DOWN, pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_RECOVERY_MODE, pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf);
}
@@ -6406,7 +6455,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
goto out;
/* Get the initial DCB configuration */
- err = i40e_init_dcb(hw);
+ err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
if ((!hw->func_caps.dcb) ||
@@ -6496,6 +6545,12 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_10GB:
speed = "10 G";
break;
+ case I40E_LINK_SPEED_5GB:
+ speed = "5 G";
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ speed = "2.5 G";
+ break;
case I40E_LINK_SPEED_1GB:
speed = "1000 M";
break;
@@ -6849,10 +6904,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
struct i40e_pf *pf = vsi->back;
u8 enabled_tc = 0, num_tc, hw;
bool need_reset = false;
+ int old_queue_pairs;
int ret = -EINVAL;
u16 mode;
int i;
+ old_queue_pairs = vsi->num_queue_pairs;
num_tc = mqprio_qopt->qopt.num_tc;
hw = mqprio_qopt->qopt.hw;
mode = mqprio_qopt->mode;
@@ -6953,6 +7010,7 @@ config_tc:
}
ret = i40e_configure_queue_channels(vsi);
if (ret) {
+ vsi->num_queue_pairs = old_queue_pairs;
netdev_info(netdev,
"Failed configuring queue channels\n");
need_reset = true;
@@ -8162,8 +8220,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
- set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
- set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
exit:
@@ -9335,6 +9393,11 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
dev_warn(&pf->pdev->dev,
"shutdown_lan_hmc failed: %d\n", ret);
}
+
+ /* Save the current PTP time so that we can restore the time after the
+ * reset completes.
+ */
+ i40e_ptp_save_hw_time(pf);
}
/**
@@ -9427,6 +9490,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
+ int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
u8 set_fc_aq_fail = 0;
@@ -9434,7 +9498,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
u32 val;
int v;
- if (test_bit(__I40E_DOWN, pf->state))
+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
+ i40e_check_recovery_mode(pf)) {
+ i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+ }
+
+ if (test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !old_recovery_mode_bit)
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -9463,6 +9534,44 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
i40e_verify_eeprom(pf);
+ /* if we are going out of or into recovery mode we have to act
+ * accordingly with regard to resources initialization
+ * and deinitialization
+ */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
+ old_recovery_mode_bit) {
+ if (i40e_get_capabilities(pf,
+ i40e_aqc_opc_list_func_capabilities))
+ goto end_unlock;
+
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ /* we're staying in recovery mode so we'll reinitialize
+ * misc vector here
+ */
+ if (i40e_setup_misc_vector_for_recovery_mode(pf))
+ goto end_unlock;
+ } else {
+ if (!lock_acquired)
+ rtnl_lock();
+ /* we're going out of recovery mode so we'll free
+ * the IRQ allocated specifically for recovery mode
+ * and restore the interrupt scheme
+ */
+ free_irq(pf->pdev->irq, pf);
+ i40e_clear_interrupt_scheme(pf);
+ if (i40e_restore_interrupt_scheme(pf))
+ goto end_unlock;
+ }
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* bail out in case recovery mode was detected, as there is
+ * no need for further configuration.
+ */
+ goto end_unlock;
+ }
+
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (ret)
@@ -9714,7 +9823,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
bool mdd_detected = false;
- bool pf_mdd_detected = false;
struct i40e_vf *vf;
u32 reg;
int i;
@@ -9760,19 +9868,12 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_PF_MDET_TX);
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
+ dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
- dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
- /* Queue belongs to the PF, initiate a reset */
- if (pf_mdd_detected) {
- set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
- i40e_service_event_schedule(pf);
+ dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
}
}
@@ -9785,6 +9886,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
@@ -9793,11 +9897,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
- }
-
- if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
- dev_info(&pf->pdev->dev,
- "Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
@@ -9924,31 +10023,38 @@ static void i40e_service_task(struct work_struct *work)
unsigned long start_time = jiffies;
/* don't bother with service tasks if a reset is in progress */
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_SUSPENDED, pf->state))
return;
if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
- i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
- i40e_sync_filters_subtask(pf);
- i40e_reset_subtask(pf);
- i40e_handle_mdd_event(pf);
- i40e_vc_process_vflr_event(pf);
- i40e_watchdog_subtask(pf);
- i40e_fdir_reinit_subtask(pf);
- if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
- /* Client subtask will reopen next time through. */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+ i40e_sync_filters_subtask(pf);
+ i40e_reset_subtask(pf);
+ i40e_handle_mdd_event(pf);
+ i40e_vc_process_vflr_event(pf);
+ i40e_watchdog_subtask(pf);
+ i40e_fdir_reinit_subtask(pf);
+ if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
+ /* Client subtask will reopen next time through. */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
+ true);
+ } else {
+ i40e_client_subtask(pf);
+ if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
+ pf->state))
+ i40e_notify_client_of_l2_param_changes(
+ pf->vsi[pf->lan_vsi]);
+ }
+ i40e_sync_filters_subtask(pf);
+ i40e_sync_udp_filters_subtask(pf);
} else {
- i40e_client_subtask(pf);
- if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
- pf->state))
- i40e_notify_client_of_l2_param_changes(
- pf->vsi[pf->lan_vsi]);
- }
- i40e_sync_filters_subtask(pf);
- i40e_sync_udp_filters_subtask(pf);
+ i40e_reset_subtask(pf);
+ }
+
i40e_clean_adminq_subtask(pf);
/* flush memory to make sure state is correct before next watchdog */
@@ -10129,6 +10235,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
+ if (type == I40E_VSI_MAIN) {
+ vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_rings;
+ }
+
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
goto err_rings;
@@ -10147,6 +10259,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
goto unlock_pf;
err_rings:
+ bitmap_free(vsi->af_xdp_zc_qps);
pf->next_vsi = i - 1;
kfree(vsi);
unlock_pf:
@@ -10227,6 +10340,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ bitmap_free(vsi->af_xdp_zc_qps);
i40e_vsi_free_arrays(vsi, true);
i40e_clear_rss_config_user(vsi);
@@ -10760,6 +10874,48 @@ err_unwind:
}
/**
+ * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
+ * non queue events in recovery mode
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
+ * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
+ * This is handled differently than in recovery mode since no Tx/Rx resources
+ * are being allocated.
+ **/
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
+{
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_setup_misc_vector(pf);
+
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI-X misc vector request failed, error %d\n",
+ err);
+ return err;
+ }
+ } else {
+ u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
+
+ err = request_irq(pf->pdev->irq, i40e_intr, flags,
+ pf->int_name, pf);
+
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI/legacy misc vector request failed, error %d\n",
+ err);
+ return err;
+ }
+ i40e_enable_misc_int_causes(pf);
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return 0;
+}
+
+/**
* i40e_setup_misc_vector - Setup the misc vector to handle non queue events
* @pf: board private structure
*
@@ -11070,6 +11226,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
return 0;
+ queue_count = min_t(int, queue_count, num_online_cpus());
new_rss_size = min_t(int, queue_count, pf->rss_size_max);
if (queue_count != vsi->num_queue_pairs) {
@@ -13901,6 +14058,148 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
}
/**
+ * i40e_set_fec_in_flags - helper function for setting FEC options in flags
+ * @fec_cfg: FEC option to set in flags
+ * @flags: ptr to flags in which we set FEC option
+ **/
+void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
+{
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
+ *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
+ *flags |= I40E_FLAG_RS_FEC;
+ *flags &= ~I40E_FLAG_BASE_R_FEC;
+ }
+ if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
+ (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
+ *flags |= I40E_FLAG_BASE_R_FEC;
+ *flags &= ~I40E_FLAG_RS_FEC;
+ }
+ if (fec_cfg == 0)
+ *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
+}
+
+/**
+ * i40e_check_recovery_mode - check if we are running transition firmware
+ * @pf: board private structure
+ *
+ * Check registers indicating the firmware runs in recovery mode. Sets the
+ * appropriate driver state.
+ *
+ * Returns true if the recovery mode was detected, false otherwise
+ **/
+static bool i40e_check_recovery_mode(struct i40e_pf *pf)
+{
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
+
+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
+ dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ set_bit(__I40E_RECOVERY_MODE, pf->state);
+
+ return true;
+ }
+ if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
+ dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
+
+ return false;
+}
+
+/**
+ * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
+ * @pf: board private structure
+ * @hw: ptr to the hardware info
+ *
+ * This function does a minimal setup of all subsystems needed for running
+ * recovery mode.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+{
+ struct i40e_vsi *vsi;
+ int err;
+ int v_idx;
+
+ pci_save_state(pf->pdev);
+
+ /* set up periodic task facility */
+ setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
+ pf->service_timer_period = HZ;
+
+ INIT_WORK(&pf->service_task, i40e_service_task);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
+ */
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+ /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
+ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
+ GFP_KERNEL);
+ if (!pf->vsi) {
+ err = -ENOMEM;
+ goto err_switch_setup;
+ }
+
+ /* We allocate one VSI which is needed as absolute minimum
+ * in order to register the netdev
+ */
+ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
+ if (v_idx < 0)
+ goto err_switch_setup;
+ pf->lan_vsi = v_idx;
+ vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_switch_setup;
+ vsi->alloc_queue_pairs = 1;
+ err = i40e_config_netdev(vsi);
+ if (err)
+ goto err_switch_setup;
+ err = register_netdev(vsi->netdev);
+ if (err)
+ goto err_switch_setup;
+ vsi->netdev_registered = true;
+ i40e_dbg_pf_init(pf);
+
+ err = i40e_setup_misc_vector_for_recovery_mode(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* since everything's happy, start the service_task timer */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
+ del_timer_sync(&pf->service_timer);
+ i40e_shutdown_adminq(hw);
+ iounmap(hw->hw_addr);
+ pci_disable_pcie_error_reporting(pf->pdev);
+ pci_release_mem_regions(pf->pdev);
+ pci_disable_device(pf->pdev);
+ kfree(pf);
+
+ return err;
+}
+
+/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
@@ -13997,6 +14296,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&pf->l3_flex_pit_list);
INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+ INIT_LIST_HEAD(&pf->ddp_old_prof);
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
@@ -14024,13 +14324,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Reset here to make sure all is clean and to define PF 'n' */
i40e_clear_hw(hw);
- err = i40e_pf_reset(hw);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
- goto err_pf_reset;
+ if (!i40e_check_recovery_mode(pf)) {
+ err = i40e_pf_reset(hw);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
+ }
+ pf->pfr_count++;
}
- pf->pfr_count++;
-
hw->aq.num_arq_entries = I40E_AQ_LEN;
hw->aq.num_asq_entries = I40E_AQ_LEN;
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
@@ -14055,7 +14356,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
dev_info(&pdev->dev,
- "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+ "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
else
dev_info(&pdev->dev,
"The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
@@ -14064,19 +14369,28 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_oem_version(hw);
- /* provide nvm, fw, api versions */
- dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+ /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
+ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
- i40e_nvm_version_str(hw));
+ i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
+ hw->subsystem_vendor_id, hw->subsystem_device_id);
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
dev_info(&pdev->dev,
- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
dev_info(&pdev->dev,
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
i40e_verify_eeprom(pf);
@@ -14085,6 +14399,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
i40e_clear_pxe_mode(hw);
+
err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (err)
goto err_adminq_setup;
@@ -14095,6 +14410,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init;
}
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+ return i40e_init_recovery_mode(pf, hw);
+
err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (err) {
@@ -14115,7 +14433,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
- i40e_aq_stop_lldp(hw, true, NULL);
+ i40e_aq_stop_lldp(hw, true, false, NULL);
}
/* allow a platform config to override the HW addr */
@@ -14391,6 +14709,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+ /* set the FEC config due to the board capabilities */
+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
+
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
@@ -14477,6 +14798,19 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->service_task.func)
cancel_work_sync(&pf->service_task);
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ struct i40e_vsi *vsi = pf->vsi[0];
+
+ /* We know that we have allocated only one vsi for this PF,
+ * it was just for registering netdevice, so the interface
+ * could be visible in the 'ifconfig' output
+ */
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+
+ goto unmap;
+ }
+
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
@@ -14526,6 +14860,12 @@ static void i40e_remove(struct pci_dev *pdev)
ret_code);
}
+unmap:
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ free_irq(pf->pdev->irq, pf);
+
/* shutdown the adminq */
i40e_shutdown_adminq(hw);
@@ -14538,7 +14878,8 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_clear_interrupt_scheme(pf);
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
- i40e_vsi_clear_rings(pf->vsi[i]);
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(pf->vsi[i]);
i40e_vsi_clear(pf->vsi[i]);
pf->vsi[i] = NULL;
}
@@ -14755,6 +15096,11 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_WUFC,
(pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ free_irq(pf->pdev->irq, pf);
+
/* Since we're going to destroy queues during the
* i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
* whole section
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 937e7f588c2d..cbefe319988b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -601,11 +601,10 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
__le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- if (!ret_code) {
- le_sum = cpu_to_le16(checksum);
+ le_sum = cpu_to_le16(checksum);
+ if (!ret_code)
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &le_sum, true);
- }
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index a4e8c812e9ff..29b02ec0ec15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -226,14 +226,18 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
bool dcb_enable,
struct i40e_asq_cmd_details
*cmd_details);
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
@@ -452,10 +456,16 @@ i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile);
enum i40e_status_code
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 776a97521649..b4ebea47dcd5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -165,12 +165,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now;
+ struct timespec64 now, then;
+ then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now);
- timespec64_add_ns(&now, delta);
+ now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
mutex_unlock(&pf->tmreg_lock);
@@ -741,16 +742,68 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ /* Set the previous "reset" time to the current Kernel clock time */
+ pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real());
+ pf->ptp_reset_start = ktime_get();
+
return 0;
}
/**
+ * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time
+ * @pf: Board private structure
+ *
+ * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should
+ * be called at the end of preparing to reset, just before hardware reset
+ * occurs, in order to preserve the PTP time as close as possible across
+ * resets.
+ */
+void i40e_ptp_save_hw_time(struct i40e_pf *pf)
+{
+ /* don't try to access the PTP clock if it's not enabled */
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return;
+
+ i40e_ptp_gettime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
+ /* Get a monotonic starting time for this reset */
+ pf->ptp_reset_start = ktime_get();
+}
+
+/**
+ * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs
+ * @pf: Board private structure
+ *
+ * Restore the PTP hardware clock registers. We previously cached the PTP
+ * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible,
+ * update this value based on the time delta since the time was saved, using
+ * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference.
+ *
+ * This ensures that the hardware clock is restored to nearly what it should
+ * have been if a reset had not occurred.
+ */
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
+{
+ ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start);
+
+ /* Update the previous HW time with the ktime delta */
+ timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta));
+
+ /* Restore the hardware clock registers */
+ i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
+}
+
+/**
* i40e_ptp_init - Initialize the 1588 support after device probe or reset
* @pf: Board private structure
*
* This function sets device up for 1588 support. The first time it is run, it
* will create a PHC clock device. It does not create a clock device if one
* already exists. It also reconfigures the device after a reset.
+ *
+ * The first time a clock is created, i40e_ptp_create_clock will set
+ * pf->ptp_prev_hw_time to the current system time. During resets, it is
+ * expected that this timespec will be set to the last known PTP clock time,
+ * in order to preserve the clock time as close as possible across a reset.
**/
void i40e_ptp_init(struct i40e_pf *pf)
{
@@ -782,7 +835,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
} else if (pf->ptp_clock) {
- struct timespec64 ts;
u32 regval;
if (pf->hw.debug_mask & I40E_DEBUG_LAN)
@@ -803,9 +855,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
/* reset timestamping mode */
i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
- /* Set the clock value. */
- ts = ktime_to_timespec64(ktime_get_real());
- i40e_ptp_settime(&pf->ptp_caps, &ts);
+ /* Restore the clock time based on last known value */
+ i40e_ptp_restore_hw_time(pf);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index d70dbf08b0f4..5d6c989541df 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -275,6 +275,12 @@ struct i40e_phy_info {
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
I40E_PHY_TYPE_OFFSET)
+/* Offset for 2.5G/5G PHY Types value to bit number conversion */
+#define I40E_PHY_TYPE_OFFSET2 (-10)
+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@@ -639,6 +645,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
u64 flags;
/* Used in set switch config AQ command */
@@ -1550,6 +1557,8 @@ struct i40e_generic_seg_header {
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY 0
+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
u32 track_id;
char name[I40E_DDP_NAME_SIZE];
};
@@ -1578,15 +1587,36 @@ struct i40e_profile_section_header {
struct {
#define SECTION_TYPE_INFO 0x00000010
#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
#define SECTION_TYPE_NOTE 0x80000000
#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
u32 type;
u32 offset;
u32 size;
} section;
};
+struct i40e_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
struct i40e_profile_info {
u32 track_id;
struct i40e_ddp_version version;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3d4171770022..1cbd55f66503 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -204,7 +204,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
* check for the valid queue id
**/
static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
- u8 qid)
+ u16 qid)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
@@ -219,7 +219,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
*
* check for the valid vector id
**/
-static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
{
struct i40e_pf *pf = vf->pf;
@@ -464,14 +464,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf, size;
+ int ret = 0;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+ if (qvlist_info->num_vectors > msix_vf) {
+ dev_warn(&pf->pdev->dev,
+ "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
+ qvlist_info->num_vectors,
+ msix_vf);
+ ret = -EINVAL;
+ goto err_out;
+ }
size = sizeof(struct virtchnl_iwarp_qvlist_info) +
(sizeof(struct virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1));
+ kfree(vf->qvlist_info);
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
- if (!vf->qvlist_info)
- return -ENOMEM;
-
+ if (!vf->qvlist_info) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -482,8 +496,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this vf */
- if (!i40e_vc_isvalid_vector_id(vf, v_idx))
- goto err;
+ if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
+ ret = -EINVAL;
+ goto err_free;
+ }
vf->qvlist_info->qv_info[i] = *qv_info;
@@ -525,10 +541,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
}
return 0;
-err:
+err_free:
kfree(vf->qvlist_info);
vf->qvlist_info = NULL;
- return -EINVAL;
+err_out:
+ return ret;
}
/**
@@ -1135,15 +1152,6 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
return I40E_ERR_PARAM;
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (allmulti || alluni)) {
- dev_err(&pf->pdev->dev,
- "Unprivileged VF %d is attempting to configure promiscuous mode\n",
- vf->vf_id);
- /* Lie to the VF on purpose. */
- return 0;
- }
-
if (vf->port_vlan_id) {
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
allmulti,
@@ -2020,8 +2028,31 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
bool allmulti = false;
bool alluni = false;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
- return I40E_ERR_PARAM;
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+
+ /* Lie to the VF on purpose, because this is an error we can
+ * ignore. Unprivileged VF is not a virtual channel error.
+ */
+ aq_ret = 0;
+ goto err_out;
+ }
+
+ if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
/* Multicast promiscuous handling*/
if (info->flags & FLAG_VF_MULTICAST_PROMISC)
@@ -2055,7 +2086,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
}
}
-
+err_out:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
@@ -2077,25 +2108,46 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_pair_info *qpi;
struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id = 0;
+ u16 num_qps_all = 0;
i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
- vsi_id = qci->vsi_id;
-
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ if (vf->adq_enabled) {
+ for (i = 0; i < I40E_MAX_VF_VSI; i++)
+ num_qps_all += vf->ch[i].num_qps;
+ if (num_qps_all != qci->num_queue_pairs) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ vsi_id = qci->vsi_id;
+
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (!vf->adq_enabled) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ qpi->txq.queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
vsi_queue_id = qpi->txq.queue_id;
if (qpi->txq.vsi_id != qci->vsi_id ||
@@ -2106,10 +2158,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
+ if (vf->adq_enabled)
+ vsi_id = vf->ch[idx].vsi_id;
if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
&qpi->rxq) ||
@@ -2133,7 +2183,6 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
j++;
vsi_queue_id++;
}
- vsi_id = vf->ch[idx].vsi_id;
}
}
/* set vsi num_queue_pairs in use to num configured by VF */
@@ -2192,7 +2241,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
- u16 vsi_id, vector_id;
+ u16 vsi_id;
i40e_status aq_ret = 0;
int i;
@@ -2201,16 +2250,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
+ if (irqmap_info->num_vectors >
+ vf->pf->hw.func_caps.num_msix_vectors_vf) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < irqmap_info->num_vectors; i++) {
map = &irqmap_info->vecmap[i];
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
/* validate msg params */
- if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ vsi_id = map->vsi_id;
if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
aq_ret = I40E_ERR_PARAM;
@@ -2358,7 +2412,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
+ vqs->rx_queues > I40E_MAX_VF_QUEUES ||
+ vqs->tx_queues > I40E_MAX_VF_QUEUES) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2472,8 +2528,10 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program
+ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
+ */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
#define I40E_VC_MAX_VLAN_PER_VF 8
/**
@@ -2782,7 +2840,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
- aq_ret = I40E_ERR_PARAM;
+ if (vfl->num_elements > 1 || vfl->vlan_id[0])
+ aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -3144,7 +3203,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
}
if (mask.dst_port & data.dst_port) {
- if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
+ if (!data.dst_port) {
dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
vf->vf_id);
goto err;
@@ -3152,7 +3211,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
}
if (mask.src_port & data.src_port) {
- if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
+ if (!data.src_port) {
dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
vf->vf_id);
goto err;
@@ -3392,7 +3451,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
if (!vf->adq_enabled) {
@@ -3400,15 +3459,15 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: ADq is not enabled, can't apply cloud filter\n",
vf->vf_id);
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
if (i40e_validate_cloud_filter(vf, vcf)) {
dev_info(&pf->pdev->dev,
"VF %d: Invalid input/s, can't apply cloud filter\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
- goto err;
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
@@ -3469,13 +3528,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
vf->vf_id, i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- goto err;
+ goto err_free;
}
INIT_HLIST_NODE(&cfilter->cloud_node);
hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
+ /* release the pointer passing it to the collection */
+ cfilter = NULL;
vf->num_cloud_filters++;
-err:
+err_free:
+ kfree(cfilter);
+err_out:
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
aq_ret);
}
@@ -3679,7 +3742,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
int ret;
pf->vf_aq_requests++;
- if (local_vf_id >= pf->num_alloc_vfs)
+ if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[local_vf_id]);
@@ -4027,6 +4090,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ bool allmulti = false, alluni = false;
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
@@ -4111,6 +4175,15 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* disable promisc modes in case they were enabled */
+ ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
+ allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
+ goto error_pvid;
+ }
+
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
@@ -4137,6 +4210,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+ alluni = true;
+
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+ allmulti = true;
+
/* Schedule the worker thread to take care of applying changes */
i40e_service_event_schedule(vsi->back);
@@ -4149,6 +4228,13 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
* default LAN MAC address.
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+ ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+ goto error_pvid;
+ }
+
ret = 0;
error_pvid:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index d7bd884ef3d0..9ee753bc4efa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -40,6 +40,8 @@
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0xE000
+#define I40E_MAX_VF_PROMISC_FLAGS 3
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 3827f16e6923..037560c0efac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -10,69 +10,6 @@
#include "i40e_xsk.h"
/**
- * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs
- * @vsi: Current VSI
- *
- * Returns 0 on success, <0 on failure
- **/
-static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
-{
- if (vsi->xsk_umems)
- return 0;
-
- vsi->num_xsk_umems_used = 0;
- vsi->num_xsk_umems = vsi->alloc_queue_pairs;
- vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
- GFP_KERNEL);
- if (!vsi->xsk_umems) {
- vsi->num_xsk_umems = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
- * @vsi: Current VSI
- * @umem: UMEM to store
- * @qid: Ring/qid to associate with the UMEM
- *
- * Returns 0 on success, <0 on failure
- **/
-static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
- u16 qid)
-{
- int err;
-
- err = i40e_alloc_xsk_umems(vsi);
- if (err)
- return err;
-
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
-
- return 0;
-}
-
-/**
- * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
- * @vsi: Current VSI
- * @qid: Ring/qid associated with the UMEM
- **/
-static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid)
-{
- vsi->xsk_umems[qid] = NULL;
- vsi->num_xsk_umems_used--;
-
- if (vsi->num_xsk_umems == 0) {
- kfree(vsi->xsk_umems);
- vsi->xsk_umems = NULL;
- vsi->num_xsk_umems = 0;
- }
-}
-
-/**
* i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
* @vsi: Current VSI
* @umem: UMEM to DMA map
@@ -140,6 +77,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid)
{
+ struct net_device *netdev = vsi->netdev;
struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -150,12 +88,9 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (qid >= vsi->num_queue_pairs)
return -EINVAL;
- if (vsi->xsk_umems) {
- if (qid >= vsi->num_xsk_umems)
- return -EINVAL;
- if (vsi->xsk_umems[qid])
- return -EBUSY;
- }
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
if (!reuseq)
@@ -167,19 +102,15 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
if (if_running) {
err = i40e_queue_pair_disable(vsi, qid);
if (err)
return err;
- }
-
- err = i40e_add_xsk_umem(vsi, umem, qid);
- if (err)
- return err;
- if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -202,11 +133,13 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
**/
static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
{
+ struct net_device *netdev = vsi->netdev;
+ struct xdp_umem *umem;
bool if_running;
int err;
- if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
- !vsi->xsk_umems[qid])
+ umem = xdp_get_umem_from_qid(netdev, qid);
+ if (!umem)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
@@ -217,8 +150,8 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
- i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
- i40e_remove_xsk_umem(vsi, qid);
+ clear_bit(qid, vsi->af_xdp_zc_qps);
+ i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -242,20 +175,18 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
u16 qid)
{
+ struct net_device *netdev = vsi->netdev;
+ struct xdp_umem *queried_umem;
+
if (vsi->type != I40E_VSI_MAIN)
return -EINVAL;
- if (qid >= vsi->num_queue_pairs)
- return -EINVAL;
+ queried_umem = xdp_get_umem_from_qid(netdev, qid);
- if (vsi->xsk_umems) {
- if (qid >= vsi->num_xsk_umems)
- return -EINVAL;
- *umem = vsi->xsk_umems[qid];
- return 0;
- }
+ if (!queried_umem)
+ return -EINVAL;
- *umem = NULL;
+ *umem = queried_umem;
return 0;
}
@@ -950,13 +881,11 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
+ struct net_device *netdev = vsi->netdev;
int i;
- if (!vsi->xsk_umems)
- return false;
-
for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->xsk_umems[i])
+ if (xdp_get_umem_from_qid(netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
index af4f94a6541e..e5ae4a1c0cff 100644
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
@@ -14,7 +14,7 @@
#define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index e5d6f684437e..2d140ba83781 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,3 +17,4 @@ ice-y := ice_main.o \
ice_txrx.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
+ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index a385575600f6..792e6e42030e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -26,6 +26,7 @@
#include <linux/bitmap.h>
#include <linux/log2.h>
#include <linux/ip.h>
+#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
#include <linux/avf/virtchnl.h>
@@ -33,6 +34,7 @@
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
+#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
@@ -41,10 +43,21 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
-#define ICE_DFLT_NUM_DESC 128
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
#define ICE_MAX_NUM_DESC 8160
+/* set default number of Rx/Tx descriptors to the minimum between
+ * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
+ */
+#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(union ice_32byte_rx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(struct ice_tx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
@@ -70,6 +83,8 @@ extern const char ice_drv_ver[];
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
+#define ICE_NONQ_VECS_VF 1
+#define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
@@ -82,7 +97,7 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+ (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)))
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
@@ -110,6 +125,26 @@ extern const char ice_drv_ver[];
#define ice_for_each_alloc_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
+#define ice_for_each_q_vector(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+
+#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
+ ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
+#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -119,7 +154,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
- u8 ena_tc; /* TX map */
+ u8 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@@ -129,6 +164,17 @@ struct ice_res_tracker {
u16 list[1];
};
+struct ice_qs_cfg {
+ struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
+ unsigned long *pf_map;
+ unsigned long pf_map_size;
+ unsigned int q_count;
+ unsigned int scatter_count;
+ u16 *vsi_map;
+ u16 vsi_map_offset;
+ u8 mapping_mode;
+};
+
struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
@@ -209,6 +255,8 @@ struct ice_vsi {
s16 vf_id; /* VF ID for SR-IOV VSIs */
+ u16 ethtype; /* Ethernet protocol for pause frame */
+
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
@@ -232,6 +280,7 @@ struct ice_vsi {
u8 irqs_ready;
u8 current_isup; /* Sync 'link up' logging */
u8 stat_offsets_loaded;
+ u8 vlan_ena;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -242,26 +291,34 @@ struct ice_vsi {
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
- u16 num_desc;
+ u16 num_rx_desc;
+ u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
struct ice_q_vector {
struct ice_vsi *vsi;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- struct ice_ring_container rx;
- struct ice_ring_container tx;
- struct irq_affinity_notify affinity_notify;
+
u16 v_idx; /* index in the vsi->q_vector array. */
- u8 num_ring_tx; /* total number of Tx rings in vector */
+ u16 reg_idx;
u8 num_ring_rx; /* total number of Rx rings in vector */
- char name[ICE_INT_NAME_STR_LEN];
+ u8 num_ring_tx; /* total number of Tx rings in vector */
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
*/
u8 intrl;
+
+ struct napi_struct napi;
+
+ struct ice_ring_container rx;
+ struct ice_ring_container tx;
+
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+
+ char name[ICE_INT_NAME_STR_LEN];
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
@@ -270,6 +327,11 @@ enum ice_pf_flags {
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
+ ICE_FLAG_DCB_CAPABLE,
+ ICE_FLAG_DCB_ENA,
+ ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ ICE_FLAG_DISABLE_FW_LLDP,
+ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -308,8 +370,8 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u16 num_lan_tx; /* num lan Tx queues setup */
- u16 num_lan_rx; /* num lan Rx queues setup */
+ u16 num_lan_tx; /* num LAN Tx queues setup */
+ u16 num_lan_rx; /* num LAN Rx queues setup */
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -323,6 +385,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */
+#ifdef CONFIG_DCB
+ u16 dcbx_cap;
+#endif /* CONFIG_DCB */
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
@@ -335,14 +400,15 @@ struct ice_netdev_priv {
/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
- * @hw: pointer to hw struct
- * @vsi: pointer to vsi struct, can be NULL
+ * @hw: pointer to HW struct
+ * @vsi: pointer to VSI struct, can be NULL
* @q_vector: pointer to q_vector, can be NULL
*/
-static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
- struct ice_q_vector *q_vector)
+static inline void
+ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
+ struct ice_q_vector *q_vector)
{
- u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
+ u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
((struct ice_pf *)hw->back)->hw_oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
@@ -358,10 +424,24 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val);
}
-static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+/**
+ * ice_find_vsi_by_type - Find and return VSI of a given type
+ * @pf: PF to search for VSI
+ * @type: Value indicating type of VSI we are looking for
+ */
+static inline struct ice_vsi *
+ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
- vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
- vsi->tc_cfg.numtc = 1;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
}
void ice_set_ethtool_ops(struct net_device *netdev);
@@ -372,5 +452,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
+void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
+#endif /* CONFIG_DCB */
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index fcdcd80b18e7..6ef083002f5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -62,7 +62,7 @@ struct ice_aqc_req_res {
#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin id of the SDP */
+ /* For SDP: pin ID of the SDP */
__le32 res_number;
/* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
__le16 status;
@@ -657,8 +657,13 @@ struct ice_aqc_get_topo {
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
+ * Add TSE (indirect 0x0401)
+ * Delete TSE (indirect 0x040F)
+ * Move TSE (indirect 0x0408)
+ * Suspend Nodes (indirect 0x0409)
+ * Resume Nodes (indirect 0x040A)
*/
-struct ice_aqc_get_cfg_elem {
+struct ice_aqc_sched_elem_cmd {
__le16 num_elem_req; /* Used by commands */
__le16 num_elem_resp; /* Used by responses */
__le32 reserved;
@@ -674,18 +679,6 @@ struct ice_aqc_suspend_resume_elem {
__le32 teid[1];
};
-/* Add TSE (indirect 0x0401)
- * Delete TSE (indirect 0x040F)
- * Move TSE (indirect 0x0408)
- */
-struct ice_aqc_add_move_delete_elem {
- __le16 num_grps_req;
- __le16 num_grps_updated;
- __le32 reserved;
- __le32 addr_high;
- __le32 addr_low;
-};
-
struct ice_aqc_elem_info_bw {
__le16 bw_profile_idx;
__le16 bw_alloc;
@@ -754,6 +747,32 @@ struct ice_aqc_delete_elem {
__le32 teid[1];
};
+/* Query Port ETS (indirect 0x040E)
+ *
+ * This indirect command is used to query port TC node configuration.
+ */
+struct ice_aqc_query_port_ets {
+ __le32 port_teid;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_port_ets_elem {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ /* 3 bits for UP per TC 0-7, 4th byte reserved */
+ __le32 up2tc;
+ u8 tc_bw_share[8];
+ __le32 port_eir_prof_id;
+ __le32 port_cir_prof_id;
+ /* 3 bits per Node priority to TC 0-7, 4th byte reserved */
+ __le32 tc_node_prio;
+#define ICE_TC_NODE_PRIO_S 0x4
+ u8 reserved1[4];
+ __le32 tc_node_teid[8]; /* Used for response, reserved in command */
+};
+
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -854,11 +873,46 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33)
#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34)
#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35)
+#define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41)
+#define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42)
+#define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43)
+#define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45)
+#define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48)
+#define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49)
+#define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52)
+#define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56)
+#define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57)
+#define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60)
+#define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62)
+#define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63)
#define ICE_PHY_TYPE_LOW_MAX_INDEX 63
+/* The second set of defines is for phy_type_high. */
+#define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 reserved;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0)
#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1)
@@ -923,10 +977,11 @@ struct ice_aqc_set_phy_cfg {
/* Set PHY config command data structure */
struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 rsvd0;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
-#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
-#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0)
+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
#define ICE_AQ_PHY_ENA_LINK BIT(3)
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
@@ -995,7 +1050,7 @@ struct ice_aqc_get_link_status_data {
u8 ext_info;
#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
- /* Port TX Suspended */
+ /* Port Tx Suspended */
#define ICE_AQ_LINK_TX_S 2
#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
#define ICE_AQ_LINK_TX_ACTIVE 0
@@ -1032,10 +1087,12 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_SPEED_20GB BIT(6)
#define ICE_AQ_LINK_SPEED_25GB BIT(7)
#define ICE_AQ_LINK_SPEED_40GB BIT(8)
+#define ICE_AQ_LINK_SPEED_50GB BIT(9)
+#define ICE_AQ_LINK_SPEED_100GB BIT(10)
#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
__le32 reserved3; /* Aligns next field to 8-byte boundary */
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 reserved4;
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
};
/* Set event mask command (direct 0x0613) */
@@ -1055,6 +1112,16 @@ struct ice_aqc_set_event_mask {
u8 reserved1[6];
};
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ice_aqc_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 ident_mode;
+#define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0)
+#define ICE_AQC_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
@@ -1079,9 +1146,9 @@ struct ice_aqc_nvm {
};
/**
- * Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to PF command (indirect 0x0801) ID is only used by PF
*
- * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to VF command (indirect 0x0802) ID is only used by PF
*
*/
struct ice_aqc_pf_vf_msg {
@@ -1091,6 +1158,126 @@ struct ice_aqc_pf_vf_msg {
__le32 addr_low;
};
+/* Get LLDP MIB (indirect 0x0A00)
+ * Note: This is also used by the LLDP MIB Change Event (0x0A01)
+ * as the format is the same.
+ */
+struct ice_aqc_lldp_get_mib {
+ u8 type;
+#define ICE_AQ_LLDP_MIB_TYPE_S 0
+#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S)
+#define ICE_AQ_LLDP_MIB_LOCAL 0
+#define ICE_AQ_LLDP_MIB_REMOTE 1
+#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2
+#define ICE_AQ_LLDP_BRID_TYPE_S 2
+#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S)
+#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0
+#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1
+/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */
+#define ICE_AQ_LLDP_TX_S 0x4
+#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S)
+#define ICE_AQ_LLDP_TX_ACTIVE 0
+#define ICE_AQ_LLDP_TX_SUSPENDED 1
+#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
+ * and in the LLDP MIB Change Event (0x0A01). They are valid for the
+ * Get LLDP MIB (0x0A00) response only.
+ */
+ u8 reserved1;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Configure LLDP MIB Change Event (direct 0x0A01) */
+/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */
+struct ice_aqc_lldp_set_mib_change {
+ u8 command;
+#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+ u8 reserved[15];
+};
+
+/* Stop LLDP (direct 0x0A05) */
+struct ice_aqc_lldp_stop {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0)
+#define ICE_AQ_LLDP_AGENT_STOP 0x0
+#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK
+#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1)
+ u8 reserved[15];
+};
+
+/* Start LLDP (direct 0x0A06) */
+struct ice_aqc_lldp_start {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_START BIT(0)
+#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1)
+ u8 reserved[15];
+};
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * The command uses the generic descriptor struct and
+ * returns the struct below as an indirect response.
+ */
+struct ice_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+#define ICE_AQC_CEE_APP_FCOE_S 0
+#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S)
+#define ICE_AQC_CEE_APP_ISCSI_S 3
+#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S)
+#define ICE_AQC_CEE_APP_FIP_S 8
+#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S)
+ __le32 tlv_status;
+#define ICE_AQC_CEE_PG_STATUS_S 0
+#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S)
+#define ICE_AQC_CEE_PFC_STATUS_S 3
+#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S)
+#define ICE_AQC_CEE_FCOE_STATUS_S 8
+#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S)
+#define ICE_AQC_CEE_ISCSI_STATUS_S 11
+#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S)
+#define ICE_AQC_CEE_FIP_STATUS_S 16
+#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S)
+ u8 reserved[12];
+};
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct ice_aqc_lldp_set_local_mib {
+ u8 type;
+#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
+#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
+#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
+#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
+#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx.
+ * The same structure is used for the response, with the command field
+ * being used as the status field.
+ */
+struct ice_aqc_lldp_stop_start_specific_agent {
+ u8 command;
+#define ICE_AQC_START_STOP_AGENT_M BIT(0)
+#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0
+#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M
+ u8 reserved[15];
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1104,6 +1291,9 @@ struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
+#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \
+ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \
+ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)
struct ice_aqc_get_set_rss_keys {
u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
@@ -1145,7 +1335,7 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
-/* Add TX LAN Queues (indirect 0x0C30) */
+/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
u8 reserved[3];
@@ -1154,7 +1344,7 @@ struct ice_aqc_add_txqs {
__le32 addr_low;
};
-/* This is the descriptor of each queue entry for the Add TX LAN Queues
+/* This is the descriptor of each queue entry for the Add Tx LAN Queues
* command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
*/
struct ice_aqc_add_txqs_perq {
@@ -1166,7 +1356,7 @@ struct ice_aqc_add_txqs_perq {
struct ice_aqc_txsched_elem info;
};
-/* The format of the command buffer for Add TX LAN Queues (0x0C30)
+/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
* each struct ice_aqc_add_tx_qgrp is variable due
* to the variable number of queues in each group!
@@ -1178,7 +1368,7 @@ struct ice_aqc_add_tx_qgrp {
struct ice_aqc_add_txqs_perq txqs[1];
};
-/* Disable TX LAN Queues (indirect 0x0C31) */
+/* Disable Tx LAN Queues (indirect 0x0C31) */
struct ice_aqc_dis_txqs {
u8 cmd_type;
#define ICE_AQC_Q_DIS_CMD_S 0
@@ -1200,7 +1390,7 @@ struct ice_aqc_dis_txqs {
__le32 addr_low;
};
-/* The buffer for Disable TX LAN Queues (indirect 0x0C31)
+/* The buffer for Disable Tx LAN Queues (indirect 0x0C31)
* contains the following structures, arrayed one after the
* other.
* Note: Since the q_id is 16 bits wide, if the
@@ -1341,14 +1531,21 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
struct ice_aqc_get_topo get_topo;
- struct ice_aqc_get_cfg_elem get_update_elem;
+ struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
- struct ice_aqc_add_move_delete_elem add_move_delete_elem;
+ struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_lldp_get_mib lldp_get_mib;
+ struct ice_aqc_lldp_set_mib_change lldp_set_event;
+ struct ice_aqc_lldp_stop lldp_stop;
+ struct ice_aqc_lldp_start lldp_start;
+ struct ice_aqc_lldp_set_local_mib lldp_set_mib;
+ struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -1381,6 +1578,8 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* Success */
+ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ ICE_AQ_RC_ENOENT = 2, /* No such element */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
@@ -1433,6 +1632,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
+ ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
ice_aqc_opc_query_sched_res = 0x0412,
@@ -1442,6 +1642,7 @@ enum ice_adminq_opc {
ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
+ ice_aqc_opc_set_port_id_led = 0x06E9,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
@@ -1449,6 +1650,14 @@ enum ice_adminq_opc {
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
ice_mbx_opc_send_msg_to_vf = 0x0802,
+ /* LLDP commands */
+ ice_aqc_opc_lldp_get_mib = 0x0A00,
+ ice_aqc_opc_lldp_set_mib_change = 0x0A01,
+ ice_aqc_opc_lldp_stop = 0x0A05,
+ ice_aqc_opc_lldp_start = 0x0A06,
+ ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ ice_aqc_opc_lldp_set_local_mib = 0x0A08,
+ ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
@@ -1456,7 +1665,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
- /* TX queue handling commands/events */
+ /* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 4c1d35da940d..da7878529929 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -31,7 +31,7 @@
* @hw: pointer to the HW structure
*
* This function sets the MAC type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
+ * vendor ID and device ID stored in the HW structure.
*/
static enum ice_status ice_set_mac_type(struct ice_hw *hw)
{
@@ -77,7 +77,7 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
/**
* ice_aq_manage_mac_read - manage MAC address read command
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the manage MAC read response
* @buf_size: Size of the virtual buffer
* @cd: pointer to command details structure or NULL
@@ -165,8 +165,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
- if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
+ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ }
return status;
}
@@ -183,6 +185,9 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
return ICE_MEDIA_UNKNOWN;
hw_link_info = &pi->phy.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) {
switch (hw_link_info->phy_type_low) {
@@ -196,6 +201,15 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_100BASE_TX:
case ICE_PHY_TYPE_LOW_1000BASE_T:
@@ -209,6 +223,11 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
case ICE_PHY_TYPE_LOW_25GBASE_CR1:
case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
return ICE_MEDIA_DA;
case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX:
@@ -219,10 +238,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_25GBASE_KR1:
case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ return ICE_MEDIA_BACKPLANE;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE;
}
}
-
return ICE_MEDIA_UNKNOWN;
}
@@ -235,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-static enum ice_status
+enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -274,6 +301,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* update current link status information */
hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
hw_link_info->link_info = link_data.link_info;
hw_link_info->an_info = link_data.an_info;
@@ -303,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
- return status;
+ return 0;
}
/**
@@ -330,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
*/
case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_FIN, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
+ ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
+ ICE_FLG_FIN, idx++);
/* flex flag 1 is not used for flexi-flag programming, skipping
* these four FLG64 bits.
*/
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
- ICE_RXFLG_EVLAN_x9100, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
- ICE_RXFLG_TNL0, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
+ ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
+ ICE_FLG_EVLAN_x9100, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
+ ICE_FLG_TNL0, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
break;
default:
@@ -390,7 +418,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
@@ -410,7 +438,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
{
@@ -449,7 +477,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cfg_fw_log - configure FW logging
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @enable: enable certain FW logging events if true, disable all if false
*
* This function enables/disables the FW logging via Rx CQ events and a UART
@@ -598,7 +626,7 @@ out:
/**
* ice_output_fw_log
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @desc: pointer to the AQ message descriptor
* @buf: pointer to the buffer accompanying the AQ message
*
@@ -614,12 +642,12 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
/**
* ice_get_itr_intrl_gran - determine int/intrl granularity
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Determines the itr/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
-static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
+static void ice_get_itr_intrl_gran(struct ice_hw *hw)
{
u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
@@ -636,13 +664,7 @@ static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
hw->itr_gran = ICE_ITR_GRAN_MAX_25;
hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
break;
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to determine itr/intrl granularity\n");
- return ICE_ERR_CFG;
}
-
- return 0;
}
/**
@@ -669,9 +691,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- status = ice_get_itr_intrl_gran(hw);
- if (status)
- return status;
+ ice_get_itr_intrl_gran(hw);
status = ice_init_all_ctrlq(hw);
if (status)
@@ -703,7 +723,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
- /* set the back pointer to hw */
+ /* set the back pointer to HW */
hw->port_info->hw = hw;
/* Initialize port_info struct with switch configuration data */
@@ -750,6 +770,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
status = ICE_ERR_CFG;
goto err_unroll_sched;
}
+ INIT_LIST_HEAD(&hw->agg_list);
status = ice_init_fltr_mgmt_struct(hw);
if (status)
@@ -800,6 +821,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_cleanup_fltr_mgmt_struct(hw);
ice_sched_cleanup_all(hw);
+ ice_sched_clear_agg(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
@@ -958,7 +980,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the Rx queue
*
- * Copies rxq context from dense structure to hw register space
+ * Copies rxq context from dense structure to HW register space
*/
static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
@@ -971,7 +993,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
if (rxq_index > QRX_CTRL_MAX_INDEX)
return ICE_ERR_PARAM;
- /* Copy each dword separately to hw */
+ /* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
wr32(hw, QRX_CONTEXT(i, rxq_index),
*((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
@@ -1015,7 +1037,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
- * it to hw register space
+ * it to HW register space
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@@ -1070,8 +1092,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*
* Dumps debug log about control command with descriptor contents.
*/
-void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
- void *buf, u16 buf_len)
+void
+ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
+ u16 buf_len)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 len;
@@ -1113,7 +1136,7 @@ void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @desc: descriptor describing the command
* @buf: buffer to use for indirect commands (NULL for direct commands)
* @buf_size: size of buffer for indirect commands (0 for direct commands)
@@ -1130,7 +1153,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
/**
* ice_aq_get_fw_ver
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cd: pointer to command details structure or NULL
*
* Get the firmware version (0x0001) from the admin queue commands
@@ -1164,7 +1187,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
/**
* ice_aq_q_shutdown
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
@@ -1187,8 +1210,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
/**
* ice_aq_req_res
- * @hw: pointer to the hw struct
- * @res: resource id
+ * @hw: pointer to the HW struct
+ * @res: resource ID
* @access: access type
* @sdp_number: resource number
* @timeout: the maximum time in ms that the driver may hold the resource
@@ -1273,8 +1296,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/**
* ice_aq_release_res
- * @hw: pointer to the hw struct
- * @res: resource id
+ * @hw: pointer to the HW struct
+ * @res: resource ID
* @sdp_number: resource number
* @cd: pointer to command details structure or NULL
*
@@ -1300,7 +1323,7 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
/**
* ice_acquire_res
* @hw: pointer to the HW structure
- * @res: resource id
+ * @res: resource ID
* @access: access type (read or write)
* @timeout: timeout in milliseconds
*
@@ -1362,7 +1385,7 @@ ice_acquire_res_exit:
/**
* ice_release_res
* @hw: pointer to the HW structure
- * @res: resource id
+ * @res: resource ID
*
* This function will release a resource using the proper Admin Command.
*/
@@ -1374,7 +1397,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
- * results in an admin Q timeout, so handle them correctly
+ * results in an admin queue timeout, so handle them correctly
*/
while ((status == ICE_ERR_AQ_TIMEOUT) &&
(total_delay < hw->adminq.sq_cmd_timeout)) {
@@ -1385,13 +1408,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
- * ice_get_guar_num_vsi - determine number of guar VSI for a PF
- * @hw: pointer to the hw structure
+ * ice_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
*
* Determine the number of valid functions by going through the bitmap returned
- * from parsing capabilities and use this to calculate the number of VSI per PF.
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
*/
-static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
{
u8 funcs;
@@ -1402,12 +1427,12 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
if (!funcs)
return 0;
- return ICE_MAX_VSI / funcs;
+ return max / funcs;
}
/**
* ice_parse_caps - parse function/device capabilities
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: pointer to a buffer containing function/device capability records
* @cap_count: number of capability records in the list
* @opc: type of capabilities list to parse
@@ -1482,7 +1507,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
+ func_p->guar_num_vsi =
+ ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
number);
@@ -1548,7 +1574,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
/**
* ice_aq_discover_caps - query function/device capabilities
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the capabilities
* @buf_size: Size of the virtual buffer
* @cap_count: cap count needed if AQ err==ENOMEM
@@ -1587,8 +1613,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* @hw: pointer to the hardware structure
* @opc: capabilities type to discover - pass in the command opcode
*/
-static enum ice_status ice_discover_caps(struct ice_hw *hw,
- enum ice_adminq_opc opc)
+static enum ice_status
+ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
{
enum ice_status status;
u32 cap_count;
@@ -1647,7 +1673,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
/**
* ice_aq_manage_mac_write - manage MAC address write command
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
* @flags: flags to control write behavior
* @cd: pointer to command details structure or NULL
@@ -1655,7 +1681,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
* This function is used to write MAC address to the NVM (0x0108).
*/
enum ice_status
-ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_write *cmd;
@@ -1667,15 +1693,15 @@ ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
cmd->flags = flags;
/* Prep values for flags, sah, sal */
- cmd->sah = htons(*((u16 *)mac_addr));
- cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
+ cmd->sah = htons(*((const u16 *)mac_addr));
+ cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_clear_pxe_mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
@@ -1691,7 +1717,7 @@ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
/**
* ice_clear_pxe_mode - clear pxe operations mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Make sure all PXE mode settings are cleared, including things
* like descriptor fetch/write-back mode.
@@ -1705,16 +1731,20 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
+ * @phy_type_high: higher part of phy_type
*
- * This helper function will convert a phy_type_low to its corresponding link
+ * This helper function will convert an entry in PHY type structure
+ * [phy_type_low, phy_type_high] to its corresponding link speed.
+ * Note: In the structure of [phy_type_low, phy_type_high], there should
+ * be one bit set, as this function will convert one PHY type to its
* speed.
- * Note: In the structure of phy_type_low, there should be one bit set, as
- * this function will convert one phy type to its speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/
-static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+static u16
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
{
+ u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
switch (phy_type_low) {
@@ -1768,46 +1798,115 @@ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
case ICE_PHY_TYPE_LOW_40G_XLAUI:
speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
break;
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
+ break;
default:
speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
}
- return speed_phy_type_low;
+ switch (phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return speed_phy_type_low;
+ else
+ return speed_phy_type_high;
}
/**
* ice_update_phy_type
* @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
* @link_speeds_bitmap: targeted link spee