aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c1
-rw-r--r--drivers/net/ethernet/3com/3c515.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c345
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h19
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c124
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h80
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c26
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c85
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c55
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h17
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h2
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/7990.h2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h40
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c79
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c335
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c30
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c348
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h44
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c83
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h101
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c60
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c841
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h127
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c234
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h102
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h391
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c131
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h606
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c320
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c6
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c43
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c100
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c261
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h216
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c790
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c96
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h12
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c166
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c96
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c204
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dnet.c3
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c150
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c491
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h85
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c26
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h59
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c177
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h97
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c34
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h86
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h159
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c1103
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c127
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c166
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h53
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c195
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1710
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c388
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h87
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c538
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c205
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c98
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c1210
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h154
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c142
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c207
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h159
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c1294
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h109
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c17
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c115
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c166
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c381
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h3
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h76
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c663
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c146
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c105
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c134
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c1697
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c840
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h166
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c919
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c355
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c397
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c605
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c733
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c415
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c1221
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c380
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h449
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h51
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c186
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.h30
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c113
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c783
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c1041
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h44
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c157
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c79
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c309
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c34
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig9
-rw-r--r--drivers/net/ethernet/mediatek/Makefile3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c1651
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c646
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c327
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c935
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c635
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c235
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c279
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c322
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c600
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c737
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c612
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h185
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c305
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c378
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c621
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c1324
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h7
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/Makefile2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h151
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c (renamed from drivers/net/ethernet/micrel/ks8851.c)698
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1393
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c357
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c485
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c12
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c81
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c5
-rw-r--r--drivers/net/ethernet/mscc/Makefile2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c235
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c113
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c30
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c29
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c324
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.h41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/ethernet/neterion/Kconfig4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c125
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h27
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c16
-rw-r--r--drivers/net/ethernet/ni/nixge.c3
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h1089
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c158
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c136
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c253
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c148
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h17
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c232
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c25
-rw-r--r--drivers/net/ethernet/realtek/8139too.c26
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1001
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c10
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c3
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/sfc/ef10.c217
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c27
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c25
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c82
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h17
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h10
-rw-r--r--drivers/net/ethernet/sfc/nic.h11
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/rx.c3
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c32
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c315
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c160
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/sun/cassini.c14
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c12
-rw-r--r--drivers/net/ethernet/ti/Kconfig25
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c36
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c205
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h13
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c626
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h29
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c1086
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h74
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c19
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c422
-rw-r--r--drivers/net/ethernet/ti/cpts.h27
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c3
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c8
561 files changed, 43744 insertions, 16650 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index b762176a1406..139d0120f511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -85,7 +85,6 @@
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/bitops.h>
-#include <linux/vermagic.h>
#include <linux/uaccess.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 90312fcd6319..47b4215bb93b 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -22,7 +22,6 @@
*/
-#include <linux/vermagic.h>
#define DRV_NAME "3c515"
#define CORKSCREW 1
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index a2b7f7ab8170..5984b7033999 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1149,7 +1149,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
print_info = (vortex_debug > 1);
if (print_info)
- pr_info("See Documentation/networking/device_drivers/3com/vortex.txt\n");
+ pr_info("See Documentation/networking/device_drivers/3com/vortex.rst\n");
pr_info("%s: 3Com %s %s at %p.\n",
print_name,
@@ -1954,7 +1954,7 @@ vortex_error(struct net_device *dev, int status)
dev->name, tx_status);
if (tx_status == 0x82) {
pr_err("Probably a duplex mismatch. See "
- "Documentation/networking/device_drivers/3com/vortex.txt\n");
+ "Documentation/networking/device_drivers/3com/vortex.rst\n");
}
dump_tx_ring(dev);
}
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 3a6fc99c6f32..7cc259893cb9 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -76,7 +76,7 @@ config VORTEX
"Hurricane" (3c555/3cSOHO) PCI
If you have such a card, say Y here. More specific information is in
- <file:Documentation/networking/device_drivers/3com/vortex.txt> and
+ <file:Documentation/networking/device_drivers/3com/vortex.rst> and
in the comments at the beginning of
<file:drivers/net/ethernet/3com/3c59x.c>.
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 42985a82321a..77d78b4c59c4 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -1,39 +1,43 @@
-/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */
-/*
- A Linux device driver for PCI NE2000 clones.
-
- Authors and other copyright holders:
- 1992-2000 by Donald Becker, NE2000 core and various modifications.
- 1995-1998 by Paul Gortmaker, core modifications and PCI support.
- Copyright 1993 assigned to the United States Government as represented
- by the Director, National Security Agency.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License (GPL), incorporated herein by reference.
- Drivers based on or derived from this code fall under the GPL and must
- retain the authorship, copyright and license notice. This file is not
- a complete program and may only be used when the entire operating
- system is licensed under the GPL.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Issues remaining:
- People are making PCI ne2000 clones! Oh the horror, the horror...
- Limited full-duplex support.
-*/
+/* A Linux device driver for PCI NE2000 clones.
+ *
+ * Authors and other copyright holders:
+ * 1992-2000 by Donald Becker, NE2000 core and various modifications.
+ * 1995-1998 by Paul Gortmaker, core modifications and PCI support.
+ * Copyright 1993 assigned to the United States Government as represented
+ * by the Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ *
+ * The author may be reached as becker@scyld.com, or C/O
+ * Scyld Computing Corporation
+ * 410 Severn Ave., Suite 210
+ * Annapolis MD 21403
+ *
+ * Issues remaining:
+ * People are making PCI NE2000 clones! Oh the horror, the horror...
+ * Limited full-duplex support.
+ */
#define DRV_NAME "ne2k-pci"
+#define DRV_DESCRIPTION "PCI NE2000 clone driver"
+#define DRV_AUTHOR "Donald Becker / Paul Gortmaker"
#define DRV_VERSION "1.03"
#define DRV_RELDATE "9/22/2003"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* The user-configurable values.
- These may be modified when a driver module is loaded.*/
+ * These may be modified when a driver module is loaded.
+ */
+
+/* More are supported, limit only on options */
+#define MAX_UNITS 8
-#define MAX_UNITS 8 /* More are supported, limit only on options */
/* Used to pass the full-duplex flag, etc. */
static int full_duplex[MAX_UNITS];
static int options[MAX_UNITS];
@@ -52,7 +56,7 @@ static int options[MAX_UNITS];
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
@@ -60,20 +64,14 @@ static int options[MAX_UNITS];
static u32 ne2k_msg_enable;
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
- " D. Becker/P. Gortmaker\n";
-
#if defined(__powerpc__)
#define inl_le(addr) le32_to_cpu(inl(addr))
#define inw_le(addr) le16_to_cpu(inw(addr))
#endif
-#define PFX DRV_NAME ": "
-
-MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
-MODULE_DESCRIPTION("PCI NE2000 clone driver");
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
module_param_named(msg_enable, ne2k_msg_enable, uint, 0444);
@@ -83,7 +81,8 @@ MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bit
MODULE_PARM_DESC(options, "Bit 5: full duplex");
MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
-/* Some defines that people can play with if so inclined. */
+/* Some defines that people can play with if so inclined.
+ */
/* Use 32 bit data-movement operations instead of 16 bit. */
#define USE_LONGIO
@@ -91,14 +90,18 @@ MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
/* Do we implement the read before write bugfix ? */
/* #define NE_RW_BUGFIX */
-/* Flags. We rename an existing ei_status field to store flags! */
-/* Thus only the low 8 bits are usable for non-init-time flags. */
+/* Flags. We rename an existing ei_status field to store flags!
+ * Thus only the low 8 bits are usable for non-init-time flags.
+ */
#define ne2k_flags reg0
+
enum {
- ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */
- FORCE_FDX=0x20, /* User override. */
- REALTEK_FDX=0x40, HOLTEK_FDX=0x80,
- STOP_PG_0x60=0x100,
+ /* Chip can do only 16/32-bit xfers. */
+ ONLY_16BIT_IO = 8, ONLY_32BIT_IO = 4,
+ /* User override. */
+ FORCE_FDX = 0x20,
+ REALTEK_FDX = 0x40, HOLTEK_FDX = 0x80,
+ STOP_PG_0x60 = 0x100,
};
enum ne2k_pci_chipsets {
@@ -120,7 +123,7 @@ static struct {
char *name;
int flags;
} pci_clone_list[] = {
- {"RealTek RTL-8029", REALTEK_FDX},
+ {"RealTek RTL-8029(AS)", REALTEK_FDX},
{"Winbond 89C940", 0},
{"Compex RL2000", 0},
{"KTI ET32P2", 0},
@@ -149,13 +152,14 @@ static const struct pci_device_id ne2k_pci_tbl[] = {
{ 0x8c4a, 0x1980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940_8c4a },
{ 0, }
};
+
MODULE_DEVICE_TABLE(pci, ne2k_pci_tbl);
/* ---- No user-serviceable parts below ---- */
#define NE_BASE (dev->base_addr)
-#define NE_CMD 0x00
+#define NE_CMD 0x00
#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
#define NE_IO_EXTENT 0x20
@@ -168,18 +172,20 @@ static int ne2k_pci_open(struct net_device *dev);
static int ne2k_pci_close(struct net_device *dev);
static void ne2k_pci_reset_8390(struct net_device *dev);
-static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
+static void ne2k_pci_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
static void ne2k_pci_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
+ struct sk_buff *skb, int ring_offset);
static void ne2k_pci_block_output(struct net_device *dev, const int count,
- const unsigned char *buf, const int start_page);
+ const unsigned char *buf,
+ const int start_page);
static const struct ethtool_ops ne2k_pci_ethtool_ops;
/* There is no room in the standard 8390 structure for extra info we need,
- so we build a meta/outer-wrapper structure.. */
+ * so we build a meta/outer-wrapper structure..
+ */
struct ne2k_pci_card {
struct net_device *dev;
struct pci_dev *pci_dev;
@@ -187,18 +193,17 @@ struct ne2k_pci_card {
-/*
- NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
- buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
- 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
- detected by their SA prefix.
-
- Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
- mode results in doubled values, which can be detected and compensated for.
-
- The probe is also responsible for initializing the card and filling
- in the 'dev' and 'ei_status' structures.
-*/
+/* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
+ * buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
+ * 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
+ * detected by their SA prefix.
+ *
+ * Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ * mode results in doubled values, which can be detected and compensated for.
+ *
+ * The probe is also responsible for initializing the card and filling
+ * in the 'dev' and 'ei_status' structures.
+ */
static const struct net_device_ops ne2k_netdev_ops = {
.ndo_open = ne2k_pci_open,
@@ -208,7 +213,7 @@ static const struct net_device_ops ne2k_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_rx_mode = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
@@ -227,28 +232,21 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
int flags = pci_clone_list[chip_idx].flags;
struct ei_device *ei_local;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
fnd_cnt++;
- i = pci_enable_device (pdev);
+ i = pci_enable_device(pdev);
if (i)
return i;
- ioaddr = pci_resource_start (pdev, 0);
+ ioaddr = pci_resource_start(pdev, 0);
irq = pdev->irq;
- if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
+ if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) == 0)) {
dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
goto err_out;
}
- if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
+ if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) {
dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
NE_IO_EXTENT, ioaddr);
goto err_out;
@@ -261,14 +259,17 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
/* Do a preliminary verification that we have a 8390. */
{
int regd;
- outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+
+ outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
regd = inb(ioaddr + 0x0d);
outb(0xff, ioaddr + 0x0d);
- outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
- inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
+ /* Clear the counter by reading. */
+ inb(ioaddr + EN0_COUNTER0);
if (inb(ioaddr + EN0_COUNTER0) != 0) {
outb(reg0, ioaddr);
- outb(regd, ioaddr + 0x0d); /* Restore the old values. */
+ /* Restore the old values. */
+ outb(regd, ioaddr + 0x0d);
goto err_out_free_res;
}
}
@@ -291,9 +292,9 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
- /* This looks like a horrible timing loop, but it should never take
- more than a few cycles.
- */
+ /* This looks like a horrible timing loop, but it should never
+ * take more than a few cycles.
+ */
while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
/* Limit wait: '2' avoids jiffy roll-over. */
if (jiffies - reset_start_time > 2) {
@@ -301,42 +302,53 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
"Card failure (no reset ack).\n");
goto err_out_free_netdev;
}
-
- outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ /* Ack all intr. */
+ outb(0xff, ioaddr + EN0_ISR);
}
/* Read the 16 bytes of station address PROM.
- We must first initialize registers, similar to NS8390_init(eifdev, 0).
- We can't reliably read the SAPROM address without this.
- (I learned the hard way!). */
+ * We must first initialize registers, similar
+ * to NS8390_init(eifdev, 0).
+ * We can't reliably read the SAPROM address without this.
+ * (I learned the hard way!).
+ */
{
struct {unsigned char value, offset; } program_seq[] = {
- {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
- {0x49, EN0_DCFG}, /* Set word-wide access. */
- {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ /* Select page 0 */
+ {E8390_NODMA + E8390_PAGE0 + E8390_STOP, E8390_CMD},
+ /* Set word-wide access */
+ {0x49, EN0_DCFG},
+ /* Clear the count regs. */
+ {0x00, EN0_RCNTLO},
+ /* Mask completion IRQ */
{0x00, EN0_RCNTHI},
- {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0x00, EN0_IMR},
{0xFF, EN0_ISR},
- {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
- {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ /* 0x20 Set to monitor */
+ {E8390_RXOFF, EN0_RXCR},
+ /* 0x02 and loopback mode */
+ {E8390_TXOFF, EN0_TXCR},
{32, EN0_RCNTLO},
{0x00, EN0_RCNTHI},
- {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ /* DMA starting at 0x0000 */
+ {0x00, EN0_RSARLO},
{0x00, EN0_RSARHI},
{E8390_RREAD+E8390_START, E8390_CMD},
};
for (i = 0; i < ARRAY_SIZE(program_seq); i++)
- outb(program_seq[i].value, ioaddr + program_seq[i].offset);
+ outb(program_seq[i].value,
+ ioaddr + program_seq[i].offset);
}
/* Note: all PCI cards have at least 16 bit access, so we don't have
- to check for 8 bit cards. Most cards permit 32 bit access. */
+ * to check for 8 bit cards. Most cards permit 32 bit access.
+ */
if (flags & ONLY_32BIT_IO) {
for (i = 0; i < 4 ; i++)
((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT));
} else
- for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++)
+ for (i = 0; i < 32 /* sizeof(SA_prom )*/; i++)
SA_prom[i] = inb(ioaddr + NE_DATAPORT);
/* We always set the 8390 registers for word mode. */
@@ -356,7 +368,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
ei_status.word16 = 1;
ei_status.ne2k_flags = flags;
if (fnd_cnt < MAX_UNITS) {
- if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX))
+ if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX))
ei_status.ne2k_flags |= FORCE_FDX;
}
@@ -388,16 +400,15 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
return 0;
err_out_free_netdev:
- free_netdev (dev);
+ free_netdev(dev);
err_out_free_res:
- release_region (ioaddr, NE_IO_EXTENT);
+ release_region(ioaddr, NE_IO_EXTENT);
err_out:
pci_disable_device(pdev);
return -ENODEV;
}
-/*
- * Magic incantation sequence for full duplex on the supported cards.
+/* Magic incantation sequence for full duplex on the supported cards.
*/
static inline int set_realtek_fdx(struct net_device *dev)
{
@@ -431,7 +442,9 @@ static int ne2k_pci_set_fdx(struct net_device *dev)
static int ne2k_pci_open(struct net_device *dev)
{
- int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED, dev->name, dev);
+ int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED,
+ dev->name, dev);
+
if (ret)
return ret;
@@ -450,7 +463,8 @@ static int ne2k_pci_close(struct net_device *dev)
}
/* Hard reset the card. This used to pause for the same period that a
- 8390 reset command required, but that shouldn't be necessary. */
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
static void ne2k_pci_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
@@ -467,31 +481,34 @@ static void ne2k_pci_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (jiffies - reset_start_time > 2) {
- netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n");
+ netdev_err(dev, "%s did not complete.\n", __func__);
break;
}
- outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+ /* Ack intr. */
+ outb(ENISR_RESET, NE_BASE + EN0_ISR);
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
-static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+static void ne2k_pci_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
{
long nic_base = dev->base_addr;
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see
+ */
if (ei_status.dmaing) {
- netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
- outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
outb(0, nic_base + EN0_RCNTHI);
outb(0, nic_base + EN0_RSARLO); /* On page boundary */
@@ -499,20 +516,22 @@ static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *
outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
- insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ insw(NE_BASE + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr) >> 1);
} else {
- *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
+ *(u32 *)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
le16_to_cpus(&hdr->count);
}
-
- outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ /* Ack intr. */
+ outb(ENISR_RDC, nic_base + EN0_ISR);
ei_status.dmaing &= ~0x01;
}
/* Block input and output, similar to the Crynwr packet driver. If you
- are porting to a new ethercard, look at the packet driver source for hints.
- The NEx000 doesn't share the on-board packet memory -- you have to put
- the packet out through the "remote DMA" dataport using outb. */
+ *are porting to a new ethercard, look at the packet driver source for hints.
+ *The NEx000 doesn't share the on-board packet memory -- you have to put
+ *the packet out through the "remote DMA" dataport using outb.
+ */
static void ne2k_pci_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
@@ -520,30 +539,30 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
long nic_base = dev->base_addr;
char *buf = skb->data;
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see.
+ */
if (ei_status.dmaing) {
- netdev_err(dev, "DMAing conflict in ne2k_pci_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
if (ei_status.ne2k_flags & ONLY_32BIT_IO)
count = (count + 3) & 0xFFFC;
- outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
outb(count & 0xff, nic_base + EN0_RCNTLO);
outb(count >> 8, nic_base + EN0_RCNTHI);
outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
outb(ring_offset >> 8, nic_base + EN0_RSARHI);
- outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ outb(E8390_RREAD + E8390_START, nic_base + NE_CMD);
if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
- insw(NE_BASE + NE_DATAPORT,buf,count>>1);
- if (count & 0x01) {
+ insw(NE_BASE + NE_DATAPORT, buf, count >> 1);
+ if (count & 0x01)
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
- }
} else {
- insl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ insl(NE_BASE + NE_DATAPORT, buf, count >> 2);
if (count & 3) {
buf += count & ~3;
if (count & 2) {
@@ -556,30 +575,32 @@ static void ne2k_pci_block_input(struct net_device *dev, int count,
*buf = inb(NE_BASE + NE_DATAPORT);
}
}
-
- outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ /* Ack intr. */
+ outb(ENISR_RDC, nic_base + EN0_ISR);
ei_status.dmaing &= ~0x01;
}
static void ne2k_pci_block_output(struct net_device *dev, int count,
- const unsigned char *buf, const int start_page)
+ const unsigned char *buf, const int start_page)
{
long nic_base = NE_BASE;
unsigned long dma_start;
/* On little-endian it's always safe to round the count up for
- word writes. */
+ * word writes.
+ */
if (ei_status.ne2k_flags & ONLY_32BIT_IO)
count = (count + 3) & 0xFFFC;
else
if (count & 0x01)
count++;
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see.
+ */
if (ei_status.dmaing) {
- netdev_err(dev, "DMAing conflict in ne2k_pci_block_output."
- "[DMAstat:%d][irqlock:%d]\n",
- ei_status.dmaing, ei_status.irqlock);
+ netdev_err(dev, "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -588,9 +609,10 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
#ifdef NE8390_RW_BUGFIX
/* Handle the read-before-write bug the same way as the
- Crynwr packet driver -- the NatSemi method doesn't work.
- Actually this doesn't always work either, but if you have
- problems with your NEx000 this is better than nothing! */
+ * Crynwr packet driver -- the NatSemi method doesn't work.
+ * Actually this doesn't always work either, but if you have
+ * problems with your NEx000 this is better than nothing!
+ */
outb(0x42, nic_base + EN0_RCNTLO);
outb(0x00, nic_base + EN0_RCNTHI);
outb(0x42, nic_base + EN0_RSARLO);
@@ -599,16 +621,16 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
#endif
outb(ENISR_RDC, nic_base + EN0_ISR);
- /* Now the normal output. */
+ /* Now the normal output. */
outb(count & 0xff, nic_base + EN0_RCNTLO);
outb(count >> 8, nic_base + EN0_RCNTHI);
outb(0x00, nic_base + EN0_RSARLO);
outb(start_page, nic_base + EN0_RSARHI);
outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
- outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ outsw(NE_BASE + NE_DATAPORT, buf, count >> 1);
} else {
- outsl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ outsl(NE_BASE + NE_DATAPORT, buf, count >> 2);
if (count & 3) {
buf += count & ~3;
if (count & 2) {
@@ -623,14 +645,15 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
- if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
+ /* Avoid clock roll-over. */
+ if (jiffies - dma_start > 2) {
netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ne2k_pci_reset_8390(dev);
- NS8390_init(dev,1);
+ NS8390_init(dev, 1);
break;
}
-
- outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ /* Ack intr. */
+ outb(ENISR_RDC, nic_base + EN0_ISR);
ei_status.dmaing &= ~0x01;
}
@@ -640,9 +663,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
struct ei_device *ei = netdev_priv(dev);
struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static u32 ne2k_pci_get_msglevel(struct net_device *dev)
@@ -677,9 +700,9 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_PM
-static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
+static int ne2k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
netif_device_detach(dev);
pci_save_state(pdev);
@@ -689,9 +712,9 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int ne2k_pci_resume (struct pci_dev *pdev)
+static int ne2k_pci_resume(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
int rc;
pci_set_power_state(pdev, PCI_D0);
@@ -718,24 +741,20 @@ static struct pci_driver ne2k_driver = {
#ifdef CONFIG_PM
.suspend = ne2k_pci_suspend,
.resume = ne2k_pci_resume,
-#endif /* CONFIG_PM */
+#endif
};
static int __init ne2k_pci_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
return pci_register_driver(&ne2k_driver);
}
static void __exit ne2k_pci_cleanup(void)
{
- pci_unregister_driver (&ne2k_driver);
+ pci_unregister_driver(&ne2k_driver);
}
module_init(ne2k_pci_init);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 2db42211329f..a64191fc2af9 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -45,7 +45,6 @@
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <linux/vermagic.h>
/*
* The current frame processor firmware fails to checksum a fragment
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 1b19385ad8a9..865892c1f23f 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -714,11 +714,11 @@ static int et131x_init_eeprom(struct et131x_adapter *adapter)
* gather additional information that normally would
* come from the eeprom, like MAC Address
*/
- adapter->has_eeprom = 0;
+ adapter->has_eeprom = false;
return -EIO;
}
}
- adapter->has_eeprom = 1;
+ adapter->has_eeprom = true;
/* Read the EEPROM for information regarding LED behavior. Refer to
* et131x_xcvr_init() for its use.
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 18d3b4340bd4..b3b8a8010142 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -417,7 +417,7 @@ static void emac_timeout(struct net_device *dev, unsigned int txqueue)
/* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
unsigned long channel;
@@ -425,7 +425,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
channel = db->tx_fifo_stat & 3;
if (channel == 3)
- return 1;
+ return NETDEV_TX_BUSY;
channel = (channel == 1 ? 1 : 0);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1671c1f36691..907125abef2c 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -554,7 +554,7 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
* physically contiguous fragment starting at
* skb->data, for length of skb_headlen(skb).
*/
-static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
unsigned int txsize = priv->tx_ring_size;
@@ -562,7 +562,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int nopaged_len = skb_headlen(skb);
- enum netdev_tx ret = NETDEV_TX_OK;
+ netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
spin_lock_bh(&priv->tx_lock);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 8baf847e8622..336742f6e3c3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -404,6 +404,10 @@ struct ena_admin_basic_stats {
u32 rx_drops_low;
u32 rx_drops_high;
+
+ u32 tx_drops_low;
+
+ u32 tx_drops_high;
};
struct ena_admin_acq_get_stats_resp {
@@ -764,8 +768,8 @@ enum ena_admin_os_type {
ENA_ADMIN_OS_DPDK = 3,
ENA_ADMIN_OS_FREEBSD = 4,
ENA_ADMIN_OS_IPXE = 5,
- ENA_ADMIN_OS_ESXI = 6,
- ENA_ADMIN_OS_GROUPS_NUM = 6,
+ ENA_ADMIN_OS_ESXI = 6,
+ ENA_ADMIN_OS_GROUPS_NUM = 6,
};
struct ena_admin_host_info {
@@ -809,7 +813,8 @@ struct ena_admin_host_info {
u16 reserved;
- /* 1 :0 : reserved
+ /* 0 : reserved
+ * 1 : rx_offset
* 2 : interrupt_moderation
* 31:3 : reserved
*/
@@ -1017,6 +1022,10 @@ struct ena_admin_aenq_keep_alive_desc {
u32 rx_drops_low;
u32 rx_drops_high;
+
+ u32 tx_drops_low;
+
+ u32 tx_drops_high;
};
struct ena_admin_ena_mmio_req_read_less_resp {
@@ -1116,6 +1125,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1
+#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1)
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2
#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2)
@@ -1125,4 +1136,4 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* aenq_link_change_desc */
#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
-#endif /*_ENA_ADMIN_H_ */
+#endif /* _ENA_ADMIN_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index a250046b8e18..432f143559a1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -62,7 +62,9 @@
#define ENA_REGS_ADMIN_INTR_MASK 1
-#define ENA_POLL_MS 5
+#define ENA_MIN_ADMIN_POLL_US 100
+
+#define ENA_MAX_ADMIN_POLL_US 5000
/*****************************************************************************/
/*****************************************************************************/
@@ -200,17 +202,17 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
u16 command_id, bool capture)
{
- if (unlikely(!queue->comp_ctx)) {
- pr_err("Completion context is NULL\n");
- return NULL;
- }
-
if (unlikely(command_id >= queue->q_depth)) {
pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
command_id, queue->q_depth);
return NULL;
}
+ if (unlikely(!queue->comp_ctx)) {
+ pr_err("Completion context is NULL\n");
+ return NULL;
+ }
+
if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
pr_err("Completion context is occupied\n");
return NULL;
@@ -375,7 +377,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
io_sq->bounce_buf_ctrl.next_to_use = 0;
size = io_sq->bounce_buf_ctrl.buffer_size *
- io_sq->bounce_buf_ctrl.buffers_num;
+ io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
@@ -523,9 +525,6 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
if (unlikely(comp_status != 0))
pr_err("admin command failed[%u]\n", comp_status);
- if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
- return -EINVAL;
-
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
return 0;
@@ -540,7 +539,14 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
return -EINVAL;
}
- return 0;
+ return -EINVAL;
+}
+
+static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
+{
+ delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
+ delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
+ usleep_range(delay_us, 2 * delay_us);
}
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
@@ -549,6 +555,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
unsigned long flags = 0;
unsigned long timeout;
int ret;
+ u32 exp = 0;
timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
@@ -572,7 +579,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- msleep(ENA_POLL_MS);
+ ena_delay_exponential_backoff_us(exp++,
+ admin_queue->ena_dev->ena_min_poll_delay_us);
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -702,8 +710,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- pr_err("illegal entry size %d\n",
- llq_info->desc_list_entry_size);
+ pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -775,7 +782,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (admin_queue->auto_polling)
admin_queue->polling = true;
} else {
- pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
+ pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
}
/* Check if shifted to polling mode.
@@ -943,12 +950,13 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
u16 exp_state)
{
- u32 val, i;
+ u32 val, exp = 0;
+ unsigned long timeout_stamp;
- /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
- timeout = (timeout * 100) / ENA_POLL_MS;
+ /* Convert timeout from resolution of 100ms to us resolution. */
+ timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
- for (i = 0; i < timeout; i++) {
+ while (1) {
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
@@ -960,10 +968,11 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
exp_state)
return 0;
- msleep(ENA_POLL_MS);
- }
+ if (time_is_before_jiffies(timeout_stamp))
+ return -ETIME;
- return -ETIME;
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
+ }
}
static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
@@ -1067,16 +1076,10 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_get_feat_resp get_resp;
- int rc;
- rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_HASH_FUNCTION,
- ena_dev->rss.hash_key_dma_addr,
- sizeof(ena_dev->rss.hash_key), 0);
- if (unlikely(rc)) {
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_HASH_FUNCTION))
return -EOPNOTSUPP;
- }
rss->hash_key =
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
@@ -1290,13 +1293,9 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 intr_delay_resolution)
{
- /* Initial value of intr_delay_resolution might be 0 */
- u16 prev_intr_delay_resolution =
- ena_dev->intr_delay_resolution ?
- ena_dev->intr_delay_resolution :
- ENA_DEFAULT_INTR_DELAY_RESOLUTION;
+ u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
- if (!intr_delay_resolution) {
+ if (unlikely(!intr_delay_resolution)) {
pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
}
@@ -1450,11 +1449,13 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
unsigned long flags = 0;
+ u32 exp = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- msleep(ENA_POLL_MS);
+ ena_delay_exponential_backoff_us(exp++,
+ ena_dev->ena_min_poll_delay_us);
spin_lock_irqsave(&admin_queue->q_lock, flags);
}
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -1802,6 +1803,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
if (ret)
goto error;
+ admin_queue->ena_dev = ena_dev;
admin_queue->running_state = true;
return 0;
@@ -2009,7 +2011,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
- unsigned long long timestamp;
+ u64 timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
u8 phase;
@@ -2027,9 +2029,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
*/
dma_rmb();
- timestamp =
- (unsigned long long)aenq_common->timestamp_low |
- ((unsigned long long)aenq_common->timestamp_high << 32);
+ timestamp = (u64)aenq_common->timestamp_low |
+ ((u64)aenq_common->timestamp_high << 32);
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrom, timestamp);
@@ -2059,8 +2060,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head,
- dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2282,12 +2282,14 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
enum ena_admin_hash_functions func,
const u8 *key, u16 key_len, u32 init_val)
{
- struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
+ enum ena_admin_hash_functions old_func;
+ struct ena_rss *rss = &ena_dev->rss;
int rc;
+ hash_key = rss->hash_key;
+
/* Make sure size is a mult of DWs */
if (unlikely(key_len & 0x3))
return -EINVAL;
@@ -2299,7 +2301,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+ if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
pr_err("Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
@@ -2325,26 +2327,27 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EINVAL;
}
+ old_func = rss->hash_func;
rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
/* Restore the old function */
if (unlikely(rc))
- ena_com_get_hash_function(ena_dev, NULL, NULL);
+ rss->hash_func = old_func;
return rc;
}
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key)
+ enum ena_admin_hash_functions *func)
{
struct ena_rss *rss = &ena_dev->rss;
struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
int rc;
+ if (unlikely(!func))
+ return -EINVAL;
+
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
rss->hash_key_dma_addr,
@@ -2357,8 +2360,15 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (rss->hash_func)
rss->hash_func--;
- if (func)
- *func = rss->hash_func;
+ *func = rss->hash_func;
+
+ return 0;
+}
+
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ ena_dev->rss.hash_key;
if (key)
memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
@@ -2641,10 +2651,10 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
* ignore this error and have indirection table support only.
*/
rc = ena_com_hash_key_allocate(ena_dev);
- if (unlikely(rc) && rc != -EOPNOTSUPP)
- goto err_hash_key;
- else if (rc != -EOPNOTSUPP)
+ if (likely(!rc))
ena_com_hash_key_fill_default_key(ena_dev);
+ else if (rc != -EOPNOTSUPP)
+ goto err_hash_key;
rc = ena_com_hash_ctrl_init(ena_dev);
if (unlikely(rc))
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 469f298199a7..bc187adf54e4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -54,9 +54,9 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define ENA_MAX_NUM_IO_QUEUES 128U
+#define ENA_MAX_NUM_IO_QUEUES 128U
/* We need to queues for each IO (on for Tx and one for Rx) */
-#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
#define ENA_MAX_HANDLERS 256
@@ -73,13 +73,15 @@
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
-#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
-#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
-#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
-#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+#define ENA_HASH_KEY_SIZE 40
-#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
+#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
struct ena_llq_configurations {
enum ena_admin_llq_header_location llq_header_location;
@@ -237,6 +239,7 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue {
void *q_dmadev;
+ struct ena_com_dev *ena_dev;
spinlock_t q_lock; /* spinlock for the admin queue */
struct ena_comp_ctx *comp_ctx;
@@ -349,6 +352,8 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
+
+ u32 ena_min_poll_delay_us;
};
struct ena_com_dev_get_features_ctx {
@@ -393,7 +398,7 @@ struct ena_aenq_handlers {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
-/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
*/
@@ -501,18 +506,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
-/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
- * @ena_dev: ENA communication layer struct
- *
- * Get the admin completion mode.
- * If polling mode is on, ena_com_execute_admin_command will perform a
- * polling on the admin completion queue for the commands completion,
- * otherwise it will wait on wait event.
- *
- * @return state
- */
-bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
-
/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
* @ena_dev: ENA communication layer struct
* @polling: Enable/Disable polling mode
@@ -527,7 +520,7 @@ void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
- * This method go over the admin completion queue and wake up all the pending
+ * This method goes over the admin completion queue and wakes up all the pending
* threads that wait on the commands wait event.
*
* @note: Should be called after MSI-X interrupt.
@@ -537,7 +530,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
/* ena_com_aenq_intr_handler - AENQ interrupt handler
* @ena_dev: ENA communication layer struct
*
- * This method go over the async event notification queue and call the proper
+ * This method goes over the async event notification queue and calls the proper
* aenq handler.
*/
void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
@@ -554,14 +547,14 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
* @ena_dev: ENA communication layer struct
*
- * This method wait until all the outstanding admin commands will be completed.
+ * This method waits until all the outstanding admin commands are completed.
*/
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
/* ena_com_validate_version - Validate the device parameters
* @ena_dev: ENA communication layer struct
*
- * This method validate the device parameters are the same as the saved
+ * This method verifies the device parameters are the same as the saved
* parameters in ena_dev.
* This method is useful after device reset, to validate the device mac address
* and the device offloads are the same as before the reset.
@@ -695,23 +688,32 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
*/
int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
-/* ena_com_get_hash_function - Retrieve the hash function and the hash key
- * from the device.
+/* ena_com_get_hash_function - Retrieve the hash function from the device.
* @ena_dev: ENA communication layer struct
* @func: hash function
- * @key: hash key
*
- * Retrieve the hash function and the hash key from the device.
+ * Retrieve the hash function from the device.
*
- * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * @note: If the caller called ena_com_fill_hash_function but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key);
+ enum ena_admin_hash_functions *func);
+/* ena_com_get_hash_key - Retrieve the hash key
+ * @ena_dev: ENA communication layer struct
+ * @key: hash key
+ *
+ * Retrieve the hash key.
+ *
+ * @note: If the caller called ena_com_fill_hash_key but didn't flush
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
/* ena_com_fill_hash_ctrl - Fill RSS hash control
* @ena_dev: ENA communication layer struct.
* @proto: The protocol to configure.
@@ -746,7 +748,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
*
* Retrieve the hash control from the device.
*
- * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
@@ -798,7 +800,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
*
* Retrieve the RSS indirection table from the device.
*
- * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
@@ -824,14 +826,14 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
- * Free the allocate debug area.
+ * Free the allocated debug area.
*/
void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
/* ena_com_delete_host_info - Free the host info resources.
* @ena_dev: ENA communication layer struct
*
- * Free the allocate host info.
+ * Free the allocated host info.
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
@@ -872,9 +874,9 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
* @cmd_completion: command completion return value.
* @cmd_comp_size: command completion size.
- * Submit an admin command and then wait until the device will return a
+ * Submit an admin command and then wait until the device returns a
* completion.
- * The completion will be copyed into cmd_comp.
+ * The completion will be copied into cmd_comp.
*
* @return - 0 on success, negative value on failure.
*/
@@ -937,7 +939,7 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *
/* ena_com_config_dev_mode - Configure the placement policy of the device.
* @ena_dev: ENA communication layer struct
* @llq_features: LLQ feature descriptor, retrieve via
- * ena_com_get_dev_attr_feat.
+ * ena_com_get_dev_attr_feat.
* @ena_llq_config: The default driver LLQ parameters configurations
*/
int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
@@ -963,7 +965,7 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
* @tx_delay_interval: Tx interval in usecs
- * @unmask: unask enable/disable
+ * @unmask: unmask enable/disable
*
* Prepare interrupt update register with the supplied parameters.
*/
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index 23beb7e7ed7b..8a8ded0de9ac 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -45,4 +45,4 @@ struct ena_common_mem_addr {
u16 reserved16;
};
-#endif /*_ENA_COMMON_H_ */
+#endif /* _ENA_COMMON_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 2845ac277724..ec8ea25e988d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -519,7 +519,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
u16 cdesc_idx = 0;
u16 nb_hw_desc;
- u16 i;
+ u16 i = 0;
WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
@@ -538,13 +538,19 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return -ENOSPC;
}
- for (i = 0; i < nb_hw_desc; i++) {
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
+ ena_rx_ctx->pkt_offset = cdesc->offset;
+
+ do {
+ ena_buf[i].len = cdesc->length;
+ ena_buf[i].req_id = cdesc->req_id;
+
+ if (++i >= nb_hw_desc)
+ break;
+
cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
- ena_buf->len = cdesc->length;
- ena_buf->req_id = cdesc->req_id;
- ena_buf++;
- }
+ } while (1);
/* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc;
@@ -578,10 +584,10 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->length = ena_buf->len;
- desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
- desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
- desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
- desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
+ ENA_ETH_IO_RX_DESC_LAST_MASK |
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
desc->req_id = req_id;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 77986c0ea52c..8b1afd3b32f2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -73,6 +73,7 @@ struct ena_com_rx_ctx {
u32 hash;
u16 descs;
int max_bufs;
+ u8 pkt_offset;
};
int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
@@ -95,7 +96,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
writel(intr_reg->intr_control, io_cq->unmask_reg);
}
-static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
@@ -113,7 +114,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
int temp;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- return ena_com_free_desc(io_sq) >= required_buffers;
+ return ena_com_free_q_entries(io_sq) >= required_buffers;
/* This calculation doesn't need to be 100% accurate. So to reduce
* the calculation overhead just Subtract 2 lines from the free descs
@@ -122,7 +123,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
*/
temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
- return ena_com_free_desc(io_sq) > temp;
+ return ena_com_free_q_entries(io_sq) > temp;
}
static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index 00e0f056a741..d105c9c56192 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -264,7 +264,9 @@ struct ena_eth_io_rx_cdesc_base {
u16 sub_qid;
- u16 reserved;
+ u8 offset;
+
+ u8 reserved;
};
/* 8-word format */
@@ -412,4 +414,4 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
-#endif /*_ENA_ETH_IO_H_ */
+#endif /* _ENA_ETH_IO_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 9cc28b4b2627..e340b65af08c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -83,6 +83,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(bad_req_id),
ENA_STAT_TX_ENTRY(llq_buffer_copy),
ENA_STAT_TX_ENTRY(missed_tx),
+ ENA_STAT_TX_ENTRY(unmask_interrupt),
};
static const struct ena_stats ena_stats_rx_strings[] = {
@@ -205,7 +206,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
}
@@ -259,7 +260,6 @@ static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
-
memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
@@ -306,10 +306,8 @@ static int ena_get_coalesce(struct net_device *net_dev,
struct ena_adapter *adapter = netdev_priv(net_dev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
- if (!ena_com_interrupt_moderation_supported(ena_dev)) {
- /* the devie doesn't support interrupt moderation */
+ if (!ena_com_interrupt_moderation_supported(ena_dev))
return -EOPNOTSUPP;
- }
coalesce->tx_coalesce_usecs =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
@@ -325,7 +323,7 @@ static int ena_get_coalesce(struct net_device *net_dev,
return 0;
}
-static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
+static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
{
unsigned int val;
int i;
@@ -336,7 +334,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
adapter->tx_ring[i].smoothed_interval = val;
}
-static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter)
+static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
{
unsigned int val;
int i;
@@ -354,24 +352,22 @@ static int ena_set_coalesce(struct net_device *net_dev,
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc;
- if (!ena_com_interrupt_moderation_supported(ena_dev)) {
- /* the devie doesn't support interrupt moderation */
+ if (!ena_com_interrupt_moderation_supported(ena_dev))
return -EOPNOTSUPP;
- }
rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
coalesce->tx_coalesce_usecs);
if (rc)
return rc;
- ena_update_tx_rings_intr_moderation(adapter);
+ ena_update_tx_rings_nonadaptive_intr_moderation(adapter);
rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
coalesce->rx_coalesce_usecs);
if (rc)
return rc;
- ena_update_rx_rings_intr_moderation(adapter);
+ ena_update_rx_rings_nonadaptive_intr_moderation(adapter);
if (coalesce->use_adaptive_rx_coalesce &&
!ena_com_get_adaptive_moderation_enabled(ena_dev))
@@ -635,6 +631,32 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
return ENA_HASH_KEY_SIZE;
}
+static int ena_indirection_table_set(struct ena_adapter *adapter,
+ const u32 *indir)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int i, rc;
+
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ i,
+ ENA_IO_RXQ_IDX(indir[i]));
+ if (unlikely(rc)) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot fill indirect table (index is too large)\n");
+ return rc;
+ }
+ }
+
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (rc) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot set indirect table\n");
+ return rc == -EPERM ? -EOPNOTSUPP : rc;
+ }
+ return rc;
+}
+
static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -672,17 +694,18 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
/* We call this function in order to check if the device
* supports getting/setting the hash function.
*/
- rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func);
if (rc) {
- if (rc == -EOPNOTSUPP) {
- key = NULL;
- hfunc = NULL;
+ if (rc == -EOPNOTSUPP)
rc = 0;
- }
return rc;
}
+ rc = ena_com_get_hash_key(adapter->ena_dev, key);
+ if (rc)
+ return rc;
+
switch (ena_func) {
case ENA_ADMIN_TOEPLITZ:
func = ETH_RSS_HASH_TOP;
@@ -699,7 +722,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (hfunc)
*hfunc = func;
- return rc;
+ return 0;
}
static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
@@ -707,27 +730,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
- enum ena_admin_hash_functions func;
- int rc, i;
+ enum ena_admin_hash_functions func = 0;
+ int rc;
if (indir) {
- for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
- rc = ena_com_indirect_table_fill_entry(ena_dev,
- i,
- ENA_IO_RXQ_IDX(indir[i]));
- if (unlikely(rc)) {
- netif_err(adapter, drv, netdev,
- "Cannot fill indirect table (index is too large)\n");
- return rc;
- }
- }
-
- rc = ena_com_indirect_table_set(ena_dev);
- if (rc) {
- netif_err(adapter, drv, netdev,
- "Cannot set indirect table\n");
- return rc == -EPERM ? -EOPNOTSUPP : rc;
- }
+ rc = ena_indirection_table_set(adapter, indir);
+ if (rc)
+ return rc;
}
switch (hfunc) {
@@ -746,7 +755,7 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
return -EOPNOTSUPP;
}
- if (key) {
+ if (key || func) {
rc = ena_com_fill_hash_function(ena_dev, func, key,
ENA_HASH_KEY_SIZE,
0xFFFFFFFF);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 2cc765df8da3..a0af74c93971 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -263,7 +263,7 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
dma_addr_t dma = 0;
u32 size;
- tx_info->xdpf = convert_to_xdp_frame(xdp);
+ tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
size = tx_info->xdpf->len;
ena_buf = tx_info->bufs;
@@ -1435,6 +1435,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
rx_info->page_offset, len, ENA_PAGE_SIZE);
+ /* The offset is non zero only for the first buffer */
+ rx_info->page_offset = 0;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx skb updated. len %d. data_len %d\n",
@@ -1590,6 +1592,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
{
u16 next_to_clean = rx_ring->next_to_clean;
struct ena_com_rx_ctx ena_rx_ctx;
+ struct ena_rx_buffer *rx_info;
struct ena_adapter *adapter;
u32 res_budget, work_done;
int rx_copybreak_pkt = 0;
@@ -1606,6 +1609,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
xdp.rxq = &rx_ring->xdp_rxq;
+ xdp.frame_sz = ENA_PAGE_SIZE;
do {
xdp_verdict = XDP_PASS;
@@ -1613,6 +1617,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
ena_rx_ctx.descs = 0;
+ ena_rx_ctx.pkt_offset = 0;
rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
rx_ring->ena_com_io_sq,
&ena_rx_ctx);
@@ -1622,6 +1627,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (unlikely(ena_rx_ctx.descs == 0))
break;
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ rx_info->page_offset = ena_rx_ctx.pkt_offset;
+
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
@@ -1683,7 +1691,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->next_to_clean = next_to_clean;
- refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
+ refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
refill_threshold =
min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
ENA_RX_REFILL_THRESH_PACKET);
@@ -1762,6 +1770,9 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
tx_ring->smoothed_interval,
true);
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.unmask_interrupt++;
+ u64_stats_update_end(&tx_ring->syncp);
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
@@ -2231,7 +2242,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -2304,7 +2315,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
if (rc) {
netif_err(adapter, ifup, adapter->netdev,
"Failed to create I/O TX queue num %d rc: %d\n",
- qid, rc);
+ qid, rc);
return rc;
}
@@ -2453,7 +2464,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter)
* ones due to past queue allocation failures.
*/
set_io_rings_size(adapter, adapter->requested_tx_ring_size,
- adapter->requested_rx_ring_size);
+ adapter->requested_rx_ring_size);
while (1) {
if (ena_xdp_present(adapter)) {
@@ -2494,7 +2505,7 @@ err_setup_tx:
if (rc != -ENOMEM) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with error code %d\n",
- rc);
+ rc);
return rc;
}
@@ -2517,7 +2528,7 @@ err_setup_tx:
new_rx_ring_size = cur_rx_ring_size / 2;
if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
- new_rx_ring_size < ENA_MIN_RING_SIZE) {
+ new_rx_ring_size < ENA_MIN_RING_SIZE) {
netif_err(adapter, ifup, adapter->netdev,
"Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
ENA_MIN_RING_SIZE);
@@ -3076,8 +3087,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
return qid;
}
-static void ena_config_host_info(struct ena_com_dev *ena_dev,
- struct pci_dev *pdev)
+static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct ena_admin_host_info *host_info;
int rc;
@@ -3107,6 +3117,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
host_info->num_cpus = num_online_cpus();
host_info->driver_supported_features =
+ ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
rc = ena_com_set_host_attributes(ena_dev);
@@ -3169,6 +3180,7 @@ static void ena_get_stats64(struct net_device *netdev,
struct ena_ring *rx_ring, *tx_ring;
unsigned int start;
u64 rx_drops;
+ u64 tx_drops;
int i;
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
@@ -3203,9 +3215,11 @@ static void ena_get_stats64(struct net_device *netdev,
do {
start = u64_stats_fetch_begin_irq(&adapter->syncp);
rx_drops = adapter->dev_stats.rx_drops;
+ tx_drops = adapter->dev_stats.tx_drops;
} while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
stats->rx_dropped = rx_drops;
+ stats->tx_dropped = tx_drops;
stats->multicast = 0;
stats->collisions = 0;
@@ -3433,6 +3447,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_mmio_reg_read_request_destroy(ena_dev);
+ /* return reset reason to default value */
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
@@ -3678,8 +3693,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
- refill_required =
- ena_com_free_desc(rx_ring->ena_com_io_sq);
+ refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
rx_ring->empty_rx_queue++;
@@ -3817,11 +3831,11 @@ static void ena_timer_service(struct timer_list *t)
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
}
-static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
+static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
+ u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
@@ -3991,7 +4005,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
}
- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
@@ -4107,8 +4121,8 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
@@ -4152,6 +4166,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_region;
}
+ ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
+
ena_dev->dmadev = &pdev->dev;
rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
@@ -4175,7 +4191,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
calc_queue_ctx.pdev = pdev;
- /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
+ /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
@@ -4219,12 +4235,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
+ adapter->last_monitored_tx_qid = 0;
adapter->xdp_first_ring = 0;
adapter->xdp_num_queues = 0;
- adapter->last_monitored_tx_qid = 0;
-
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
adapter->wd_state = wd_state;
@@ -4356,6 +4371,7 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
+ adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
if (shutdown) {
netif_device_detach(netdev);
@@ -4514,14 +4530,17 @@ static void ena_keep_alive_wd(void *adapter_data,
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
struct ena_admin_aenq_keep_alive_desc *desc;
u64 rx_drops;
+ u64 tx_drops;
desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
adapter->last_keep_alive_jiffies = jiffies;
rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
+ tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.rx_drops = rx_drops;
+ adapter->dev_stats.tx_drops = tx_drops;
u64_stats_update_end(&adapter->syncp);
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 9e1860d81908..ba030d260940 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -50,12 +50,6 @@
#define DRV_MODULE_GEN_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
-#ifndef DRV_MODULE_GENERATION
-#define DRV_MODULE_GENERATION \
- __stringify(DRV_MODULE_GEN_MAJOR) "." \
- __stringify(DRV_MODULE_GEN_MINOR) "." \
- __stringify(DRV_MODULE_GEN_SUBMINOR) "K"
-#endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)"
@@ -104,8 +98,6 @@
#define ENA_RX_RSS_TABLE_LOG_SIZE 7
#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
-#define ENA_HASH_KEY_SIZE 40
-
/* The number of tx packet completions that will be handled each NAPI poll
* cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER.
*/
@@ -137,6 +129,8 @@
#define ENA_IO_IRQ_FIRST_IDX 1
#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
+#define ENA_ADMIN_POLL_DELAY_US 100
+
/* ENA device should send keep alive msg every 1 sec.
* We wait for 6 sec just to be on the safe side.
*/
@@ -151,8 +145,9 @@
* The buffer size we share with the device is defined to be ENA_PAGE_SIZE
*/
-#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
- VLAN_HLEN - XDP_PACKET_HEADROOM)
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
@@ -248,6 +243,7 @@ struct ena_stats_tx {
u64 bad_req_id;
u64 llq_buffer_copy;
u64 missed_tx;
+ u64 unmask_interrupt;
};
struct ena_stats_rx {
@@ -333,6 +329,7 @@ struct ena_stats_dev {
u64 interface_down;
u64 admin_q_pause;
u64 rx_drops;
+ u64 tx_drops;
};
enum ena_flags_t {
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 04fcafcc059c..b514bb1b855d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -154,4 +154,4 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
-#endif /*_ENA_REGS_H_ */
+#endif /* _ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index cf3562e82ca9..50fb66369415 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -536,7 +536,7 @@ void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index 8266b3c1fefc..e53551daeea1 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -241,7 +241,7 @@ struct lance_private {
/* Now the prototypes we export */
int lance_open(struct net_device *dev);
int lance_close(struct net_device *dev);
-int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
void lance_set_multicast(struct net_device *dev);
void lance_tx_timeout(struct net_device *dev, unsigned int txqueue);
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 4e36122609a3..961796abab35 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -156,7 +156,7 @@ struct lance_memory {
struct lance_init_block init;
struct lance_tx_head tx_head[TX_RING_SIZE];
struct lance_rx_head rx_head[RX_RING_SIZE];
- char packet_area[0]; /* packet data follow after the
+ char packet_area[]; /* packet data follow after the
* init block and the ring
* descriptors and are located
* at runtime */
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 8b555665a33a..130a105d03f3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -25,6 +25,10 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o \
+ hw_atl2/hw_atl2.o \
+ hw_atl2/hw_atl2_utils.o \
+ hw_atl2/hw_atl2_utils_fw.o \
+ hw_atl2/hw_atl2_llh.o \
macsec/macsec_api.o
atlantic-$(CONFIG_MACSEC) += aq_macsec.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 7560f5506e55..52b9833fda99 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -80,8 +80,8 @@
#define AQ_CFG_LOCK_TRYS 100U
-#define AQ_CFG_DRV_AUTHOR "aQuantia"
-#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
+#define AQ_CFG_DRV_AUTHOR "Marvell"
+#define AQ_CFG_DRV_DESC "Marvell (Aquantia) Corporation(R) Network Driver"
#define AQ_CFG_DRV_NAME "atlantic"
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index c8c402b013bb..52ad9433cabc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_common.h: Basic includes for all files in project. */
@@ -37,22 +38,31 @@
#define AQ_DEVICE_ID_AQC111S 0x91B1
#define AQ_DEVICE_ID_AQC112S 0x92B1
-#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
+#define AQ_DEVICE_ID_AQC113DEV 0x00C0
+#define AQ_DEVICE_ID_AQC113CS 0x94C0
+#define AQ_DEVICE_ID_AQC114CS 0x93C0
+#define AQ_DEVICE_ID_AQC113 0x04C0
+#define AQ_DEVICE_ID_AQC113C 0x14C0
+#define AQ_DEVICE_ID_AQC115C 0x12C0
+
+#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
#define AQ_HWREV_ANY 0
#define AQ_HWREV_1 1
#define AQ_HWREV_2 2
-#define AQ_NIC_RATE_10G BIT(0)
-#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2GS BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
-
-#define AQ_NIC_RATE_EEE_10G BIT(6)
-#define AQ_NIC_RATE_EEE_5G BIT(7)
-#define AQ_NIC_RATE_EEE_2GS BIT(8)
-#define AQ_NIC_RATE_EEE_1G BIT(9)
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5GSR BIT(2)
+#define AQ_NIC_RATE_2G5 BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+#define AQ_NIC_RATE_10M BIT(6)
+
+#define AQ_NIC_RATE_EEE_10G BIT(7)
+#define AQ_NIC_RATE_EEE_5G BIT(8)
+#define AQ_NIC_RATE_EEE_2G5 BIT(9)
+#define AQ_NIC_RATE_EEE_1G BIT(10)
+#define AQ_NIC_RATE_EEE_100M BIT(11)
#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 7241cf92b43a..743d3b13b39d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -88,13 +88,13 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InDroppedDma",
};
-static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
- "Queue[%d] InPackets",
- "Queue[%d] OutPackets",
- "Queue[%d] Restarts",
- "Queue[%d] InJumboPackets",
- "Queue[%d] InLroPackets",
- "Queue[%d] InErrors",
+static const char * const aq_ethtool_queue_stat_names[] = {
+ "%sQueue[%d] InPackets",
+ "%sQueue[%d] OutPackets",
+ "%sQueue[%d] Restarts",
+ "%sQueue[%d] InJumboPackets",
+ "%sQueue[%d] InLroPackets",
+ "%sQueue[%d] InErrors",
};
#if IS_ENABLED(CONFIG_MACSEC)
@@ -166,7 +166,8 @@ static u32 aq_ethtool_n_stats(struct net_device *ndev)
struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
- ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs;
+ ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs *
+ cfg->tcs;
#if IS_ENABLED(CONFIG_MACSEC)
if (nic->macsec_cfg) {
@@ -223,7 +224,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
- struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
u8 *p = data;
int i, si;
@@ -231,24 +232,35 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
int sa;
#endif
- cfg = aq_nic_get_cfg(aq_nic);
+ cfg = aq_nic_get_cfg(nic);
switch (stringset) {
- case ETH_SS_STATS:
+ case ETH_SS_STATS: {
+ const int stat_cnt = ARRAY_SIZE(aq_ethtool_queue_stat_names);
+ char tc_string[8];
+ int tc;
+
+ memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
- for (i = 0; i < cfg->vecs; i++) {
- for (si = 0;
- si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
- si++) {
- snprintf(p, ETH_GSTRING_LEN,
- aq_ethtool_queue_stat_names[si], i);
- p += ETH_GSTRING_LEN;
+
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ if (cfg->is_qos)
+ snprintf(tc_string, 8, "TC%d ", tc);
+
+ for (i = 0; i < cfg->vecs; i++) {
+ for (si = 0; si < stat_cnt; si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_ethtool_queue_stat_names[si],
+ tc_string,
+ AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
+ p += ETH_GSTRING_LEN;
+ }
}
}
#if IS_ENABLED(CONFIG_MACSEC)
- if (!aq_nic->macsec_cfg)
+ if (!nic->macsec_cfg)
break;
memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names));
@@ -256,7 +268,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_txsc *aq_txsc;
- if (!(test_bit(i, &aq_nic->macsec_cfg->txsc_idx_busy)))
+ if (!(test_bit(i, &nic->macsec_cfg->txsc_idx_busy)))
continue;
for (si = 0;
@@ -266,7 +278,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
aq_macsec_txsc_stat_names[si], i);
p += ETH_GSTRING_LEN;
}
- aq_txsc = &aq_nic->macsec_cfg->aq_txsc[i];
+ aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy)))
continue;
@@ -283,10 +295,10 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_rxsc *aq_rxsc;
- if (!(test_bit(i, &aq_nic->macsec_cfg->rxsc_idx_busy)))
+ if (!(test_bit(i, &nic->macsec_cfg->rxsc_idx_busy)))
continue;
- aq_rxsc = &aq_nic->macsec_cfg->aq_rxsc[i];
+ aq_rxsc = &nic->macsec_cfg->aq_rxsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy)))
continue;
@@ -302,6 +314,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
}
#endif
break;
+ }
case ETH_SS_PRIV_FLAGS:
memcpy(p, aq_ethtool_priv_flag_names,
sizeof(aq_ethtool_priv_flag_names));
@@ -605,12 +618,15 @@ static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
if (speed & AQ_NIC_RATE_EEE_10G)
rate |= SUPPORTED_10000baseT_Full;
- if (speed & AQ_NIC_RATE_EEE_2GS)
+ if (speed & AQ_NIC_RATE_EEE_2G5)
rate |= SUPPORTED_2500baseX_Full;
if (speed & AQ_NIC_RATE_EEE_1G)
rate |= SUPPORTED_1000baseT_Full;
+ if (speed & AQ_NIC_RATE_EEE_100M)
+ rate |= SUPPORTED_100baseT_Full;
+
return rate;
}
@@ -777,8 +793,6 @@ static int aq_set_ringparam(struct net_device *ndev,
dev_close(ndev);
}
- aq_nic_free_vectors(aq_nic);
-
cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
@@ -787,15 +801,10 @@ static int aq_set_ringparam(struct net_device *ndev,
cfg->txds = min(cfg->txds, hw_caps->txds_max);
cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
- for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs;
- aq_nic->aq_vecs++) {
- aq_nic->aq_vec[aq_nic->aq_vecs] =
- aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg);
- if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
- err = -ENOMEM;
- goto err_exit;
- }
- }
+ err = aq_nic_realloc_vectors(aq_nic);
+ if (err)
+ goto err_exit;
+
if (ndev_running)
err = dev_open(ndev, NULL);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 03ff92bc4a7f..1bc4d33a0ce5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -153,6 +153,8 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
+ struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
+
if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
fsp->location > AQ_RX_LAST_LOC_FVLANID) {
netdev_err(aq_nic->ndev,
@@ -170,10 +172,10 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
return -EINVAL;
}
- if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
+ if (fsp->ring_cookie > cfg->num_rss_queues * cfg->tcs) {
netdev_err(aq_nic->ndev,
"ethtool: queue number must be in range [0, %d]",
- aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ cfg->num_rss_queues * cfg->tcs - 1);
return -EINVAL;
}
return 0;
@@ -262,6 +264,7 @@ static bool __must_check
aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
+ struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
bool rule_is_not_correct = false;
if (!aq_nic) {
@@ -274,11 +277,11 @@ aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
} else if (aq_check_filter(aq_nic, fsp)) {
rule_is_not_correct = true;
} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
- if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
+ if (fsp->ring_cookie >= cfg->num_rss_queues * cfg->tcs) {
netdev_err(aq_nic->ndev,
"ethtool: The specified action is invalid.\n"
"Maximum allowable value action is %u.\n",
- aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ cfg->num_rss_queues * cfg->tcs - 1);
rule_is_not_correct = true;
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 7d71bc7dc500..ed5b465bc664 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -18,6 +18,12 @@
#define AQ_HW_MAC_COUNTER_HZ 312500000ll
#define AQ_HW_PHY_COUNTER_HZ 160000000ll
+enum aq_tc_mode {
+ AQ_TC_MODE_INVALID = -1,
+ AQ_TC_MODE_8TCS,
+ AQ_TC_MODE_4TCS,
+};
+
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
@@ -29,6 +35,9 @@
(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
+/* Used for rate to Mbps conversion */
+#define AQ_MBPS_DIVISOR 125000 /* 1000000 / 8 */
+
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
@@ -46,7 +55,7 @@ struct aq_hw_caps_s {
u32 mac_regs_count;
u32 hw_alive_check_addr;
u8 msix_irqs;
- u8 tcs;
+ u8 tcs_max;
u8 rxd_alignment;
u8 rxd_size;
u8 txd_alignment;
@@ -55,6 +64,7 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
+ u32 priv_data_len;
};
struct aq_hw_link_status_s {
@@ -117,8 +127,11 @@ struct aq_stats_s {
#define AQ_HW_TXD_MULTIPLE 8U
#define AQ_HW_RXD_MULTIPLE 8U
+#define AQ_HW_QUEUES_MAX 32U
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
+#define AQ_HW_PTP_TC 2U
+
#define AQ_HW_LED_BLINK 0x2U
#define AQ_HW_LED_DEFAULT 0x0U
@@ -136,6 +149,19 @@ enum aq_priv_flags {
BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
+#define ATL_HW_CHIP_MIPS 0x00000001U
+#define ATL_HW_CHIP_TPO2 0x00000002U
+#define ATL_HW_CHIP_RPF2 0x00000004U
+#define ATL_HW_CHIP_MPI_AQ 0x00000010U
+#define ATL_HW_CHIP_ATLANTIC 0x00800000U
+#define ATL_HW_CHIP_REVISION_A0 0x01000000U
+#define ATL_HW_CHIP_REVISION_B0 0x02000000U
+#define ATL_HW_CHIP_REVISION_B1 0x04000000U
+#define ATL_HW_CHIP_ANTIGUA 0x08000000U
+
+#define ATL_HW_IS_CHIP_FEATURE(_HW_, _F_) (!!(ATL_HW_CHIP_##_F_ & \
+ (_HW_)->chip_features))
+
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@@ -159,6 +185,7 @@ struct aq_hw_s {
struct hw_atl_utils_fw_rpc rpc;
s64 ptp_clk_offset;
u16 phy_id;
+ void *priv;
};
struct aq_ring_s;
@@ -182,6 +209,11 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_soft_reset)(struct aq_hw_s *self);
+
+ int (*hw_prepare)(struct aq_hw_s *self,
+ const struct aq_fw_ops **fw_ops);
+
int (*hw_reset)(struct aq_hw_s *self);
int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
@@ -248,21 +280,19 @@ struct aq_hw_ops {
int (*hw_rss_hash_set)(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
+ int (*hw_tc_rate_limit_set)(struct aq_hw_s *self);
+
int (*hw_get_regs)(struct aq_hw_s *self,
const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
- int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+ u32 (*hw_get_fw_version)(struct aq_hw_s *self);
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
- int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
-
- int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
-
int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 7dbf49adcea6..342c5179f846 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -79,3 +79,29 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
err_exit:
return err;
}
+
+int aq_hw_num_tcs(struct aq_hw_s *hw)
+{
+ switch (hw->aq_nic_cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ return 8;
+ case AQ_TC_MODE_4TCS:
+ return 4;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+int aq_hw_q_per_tc(struct aq_hw_s *hw)
+{
+ switch (hw->aq_nic_cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ return 4;
+ case AQ_TC_MODE_4TCS:
+ return 8;
+ default:
+ return 4;
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index 9ef82d487e01..32aa5f2fb840 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -34,5 +34,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
+int aq_hw_num_tcs(struct aq_hw_s *hw);
+int aq_hw_q_per_tc(struct aq_hw_s *hw);
#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 0b3e234a54aa..4a6dfac857ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -401,7 +401,7 @@ static u32 aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa)
break;
default:
break;
- };
+ }
return result;
}
@@ -417,7 +417,7 @@ static u32 aq_to_hw_sc_idx(const u32 sc_idx, const enum aq_macsec_sc_sa sc_sa)
return sc_idx;
default:
WARN_ONCE(true, "Invalid sc_sa");
- };
+ }
return sc_idx;
}
@@ -478,7 +478,7 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
set_bit(txsc_idx, &cfg->txsc_idx_busy);
- return 0;
+ return ret;
}
static int aq_mdo_upd_secy(struct macsec_context *ctx)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 9fcab646cbd5..8a1da044e908 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -12,11 +12,13 @@
#include "aq_ethtool.h"
#include "aq_ptp.h"
#include "aq_filters.h"
+#include "aq_hw_utils.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
+#include <net/pkt_cls.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
@@ -38,7 +40,7 @@ struct net_device *aq_ndev_alloc(void)
struct net_device *ndev = NULL;
struct aq_nic_s *aq_nic = NULL;
- ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+ ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
if (!ndev)
return NULL;
@@ -330,6 +332,73 @@ static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
return 0;
}
+static int aq_validate_mqprio_opt(struct aq_nic_s *self,
+ struct tc_mqprio_qopt_offload *mqprio,
+ const unsigned int num_tc)
+{
+ const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
+ const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
+ AQ_CFG_TCS_MAX);
+
+ if (num_tc > tcs_max) {
+ netdev_err(self->ndev, "Too many TCs requested\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (num_tc != 0 && !is_power_of_2(num_tc)) {
+ netdev_err(self->ndev, "TC count should be power of 2\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
+ netdev_err(self->ndev, "Min tx rate is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ bool has_min_rate;
+ bool has_max_rate;
+ int err;
+ int i;
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
+
+ err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < mqprio->qopt.num_tc; i++) {
+ if (has_max_rate) {
+ u64 max_rate = mqprio->max_rate[i];
+
+ do_div(max_rate, AQ_MBPS_DIVISOR);
+ aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
+ }
+
+ if (has_min_rate) {
+ u64 min_rate = mqprio->min_rate[i];
+
+ do_div(min_rate, AQ_MBPS_DIVISOR);
+ aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
+ }
+ }
+
+ return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
+ mqprio->qopt.prio_tc_map);
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -341,6 +410,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_do_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = aq_ndo_setup_tc,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index a369705a786a..4435c6374f7e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_nic.c: Definition of common code for NIC. */
@@ -25,6 +26,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/ip.h>
+#include <net/pkt_cls.h>
static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
module_param_named(aq_itr, aq_itr, uint, 0644);
@@ -63,10 +65,38 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}
+/* Recalculate the number of vectors */
+static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
+ cfg->vecs = min(cfg->vecs, num_online_cpus());
+ if (self->irqvecs > AQ_HW_SERVICE_IRQS)
+ cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
+ /* cfg->vecs should be power of 2 for RSS */
+ cfg->vecs = rounddown_pow_of_two(cfg->vecs);
+
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
+ if (cfg->tcs > 2)
+ cfg->vecs = min(cfg->vecs, 4U);
+ }
+
+ if (cfg->vecs <= 4)
+ cfg->tc_mode = AQ_TC_MODE_8TCS;
+ else
+ cfg->tc_mode = AQ_TC_MODE_4TCS;
+
+ /*rss rings */
+ cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+ aq_nic_rss_init(self, cfg->num_rss_queues);
+}
+
/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
void aq_nic_cfg_start(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ int i;
cfg->tcs = AQ_CFG_TCS_DEF;
@@ -78,7 +108,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
- cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
cfg->fc.req = AQ_CFG_FC_MODE;
cfg->wol = AQ_CFG_WOL_MODES;
@@ -88,29 +117,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
+ cfg->is_ptp = true;
/*descriptors */
cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
- /*rss rings */
- cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
- cfg->vecs = min(cfg->vecs, num_online_cpus());
- if (self->irqvecs > AQ_HW_SERVICE_IRQS)
- cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
- /* cfg->vecs should be power of 2 for RSS */
- if (cfg->vecs >= 8U)
- cfg->vecs = 8U;
- else if (cfg->vecs >= 4U)
- cfg->vecs = 4U;
- else if (cfg->vecs >= 2U)
- cfg->vecs = 2U;
- else
- cfg->vecs = 1U;
-
- cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
-
- aq_nic_rss_init(self, cfg->num_rss_queues);
+ aq_nic_cfg_update_num_vecs(self);
cfg->irq_type = aq_pci_func_get_irq_type(self);
@@ -135,6 +148,9 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
cfg->is_vlan_force_promisc = true;
+
+ for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
+ cfg->prio_tc_map[i] = cfg->tcs * i / 8;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
@@ -180,6 +196,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
#if IS_ENABLED(CONFIG_MACSEC)
aq_macsec_enable(self);
#endif
+ if (self->aq_hw_ops->hw_tc_rate_limit_set)
+ self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);
+
netif_tx_wake_all_queues(self->ndev);
}
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
@@ -257,6 +276,28 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
+static int aq_nic_hw_prepare(struct aq_nic_s *self)
+{
+ int err = 0;
+
+ err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
+ if (err)
+ goto exit;
+
+ err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);
+
+exit:
+ return err;
+}
+
+static bool aq_nic_is_valid_ether_addr(const u8 *addr)
+{
+ /* Some engineering samples of Aquantia NICs are provisioned with a
+ * partially populated MAC, which is still invalid.
+ */
+ return !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0);
+}
+
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
@@ -266,7 +307,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
}
- err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
+ err = aq_nic_hw_prepare(self);
if (err)
goto err_exit;
@@ -281,6 +322,12 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
+ if (!is_valid_ether_addr(self->ndev->dev_addr) ||
+ !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
+
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
@@ -364,26 +411,35 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
+ if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
+ self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
err = aq_phy_init(self->aq_hw);
}
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ for (i = 0U; i < self->aq_vecs; i++) {
+ aq_vec = self->aq_vec[i];
+ err = aq_vec_ring_alloc(aq_vec, self, i,
+ aq_nic_get_cfg(self));
+ if (err)
+ goto err_exit;
+
aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+ }
- err = aq_ptp_init(self, self->irqvecs - 1);
- if (err < 0)
- goto err_exit;
+ if (aq_nic_get_cfg(self)->is_ptp) {
+ err = aq_ptp_init(self, self->irqvecs - 1);
+ if (err < 0)
+ goto err_exit;
- err = aq_ptp_ring_alloc(self);
- if (err < 0)
- goto err_exit;
+ err = aq_ptp_ring_alloc(self);
+ if (err < 0)
+ goto err_exit;
- err = aq_ptp_ring_init(self);
- if (err < 0)
- goto err_exit;
+ err = aq_ptp_ring_init(self);
+ if (err < 0)
+ goto err_exit;
+ }
netif_carrier_off(self->ndev);
@@ -394,9 +450,12 @@ err_exit:
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
+ struct aq_nic_cfg_s *cfg;
unsigned int i = 0U;
int err = 0;
+ cfg = aq_nic_get_cfg(self);
+
err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
@@ -434,7 +493,7 @@ int aq_nic_start(struct aq_nic_s *self)
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
- if (self->aq_nic_cfg.is_polling) {
+ if (cfg->is_polling) {
timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -452,16 +511,16 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- if (self->aq_nic_cfg.link_irq_vec) {
+ if (cfg->link_irq_vec) {
int irqvec = pci_irq_vector(self->pdev,
- self->aq_nic_cfg.link_irq_vec);
+ cfg->link_irq_vec);
err = request_threaded_irq(irqvec, NULL,
aq_linkstate_threaded_isr,
IRQF_SHARED | IRQF_ONESHOT,
self->ndev->name, self);
if (err < 0)
goto err_exit;
- self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
+ self->msix_entry_mask |= (1 << cfg->link_irq_vec);
}
err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
@@ -470,14 +529,21 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
- err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
+ err = netif_set_real_num_tx_queues(self->ndev,
+ self->aq_vecs * cfg->tcs);
if (err < 0)
goto err_exit;
- err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
+ err = netif_set_real_num_rx_queues(self->ndev,
+ self->aq_vecs * cfg->tcs);
if (err < 0)
goto err_exit;
+ for (i = 0; i < cfg->tcs; i++) {
+ u16 offset = self->aq_vecs * i;
+
+ netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
+ }
netif_tx_start_all_queues(self->ndev);
err_exit:
@@ -488,6 +554,8 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+ struct device *dev = aq_nic_get_dev(self);
struct aq_ring_buff_s *first = NULL;
u8 ipver = ip_hdr(skb)->version;
struct aq_ring_buff_s *dx_buff;
@@ -529,7 +597,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
need_context_tag = true;
}
- if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
+ if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
dx_buff->len_pkt = skb->len;
dx_buff->is_vlan = 1U;
@@ -544,12 +612,12 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
}
dx_buff->len = skb_headlen(skb);
- dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
+ dx_buff->pa = dma_map_single(dev,
skb->data,
dx_buff->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
ret = 0;
goto exit;
}
@@ -581,13 +649,13 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
else
buff_size = frag_len;
- frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
+ frag_pa = skb_frag_dma_map(dev,
frag,
buff_offset,
buff_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
+ if (unlikely(dma_mapping_error(dev,
frag_pa)))
goto mapping_error;
@@ -621,12 +689,12 @@ mapping_error:
if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
!dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
- dma_unmap_single(aq_nic_get_dev(self),
+ dma_unmap_single(dev,
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
} else {
- dma_unmap_page(aq_nic_get_dev(self),
+ dma_unmap_page(dev,
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
@@ -640,15 +708,16 @@ exit:
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
- unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+ unsigned int vec = skb->queue_mapping % cfg->vecs;
+ unsigned int tc = skb->queue_mapping / cfg->vecs;
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
int err = NETDEV_TX_OK;
- unsigned int tc = 0U;
frags = skb_shinfo(skb)->nr_frags + 1;
- ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
+ ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
if (frags > AQ_CFG_SKB_FRAGS_MAX) {
dev_kfree_skb_any(skb);
@@ -657,13 +726,14 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
aq_ring_update_queue_state(ring);
- if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
+ if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
/* Above status update may stop the queue. Check this. */
- if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
+ if (__netif_subqueue_stopped(self->ndev,
+ AQ_NIC_RING2QMAP(self, ring->idx))) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
@@ -764,6 +834,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
u32 *regs_buff = p;
int err = 0;
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return -EOPNOTSUPP;
+
regs->version = 1;
err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
@@ -778,6 +851,9 @@ err_exit:
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
+ if (unlikely(!self->aq_hw_ops->hw_get_regs))
+ return 0;
+
return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
@@ -787,6 +863,7 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
+ unsigned int tc;
if (self->aq_fw_ops->update_stats) {
mutex_lock(&self->fwreq_mutex);
@@ -825,10 +902,13 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
- data += count;
- aq_vec_get_sw_stats(aq_vec, data, &count);
+ for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ aq_vec && self->aq_vecs > i;
+ ++i, aq_vec = self->aq_vec[i]) {
+ data += count;
+ aq_vec_get_sw_stats(aq_vec, tc, data, &count);
+ }
}
data += count;
@@ -873,7 +953,7 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
5000baseT_Full);
- if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2G5)
ethtool_link_ksettings_add_link_mode(cmd, supported,
2500baseT_Full);
@@ -885,6 +965,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+
if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
@@ -912,7 +996,7 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
5000baseT_Full);
- if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
2500baseT_Full);
@@ -924,6 +1008,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
+ if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+
if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
@@ -954,6 +1042,10 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
speed = cmd->base.speed;
switch (speed) {
+ case SPEED_10:
+ rate = AQ_NIC_RATE_10M;
+ break;
+
case SPEED_100:
rate = AQ_NIC_RATE_100M;
break;
@@ -963,7 +1055,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
break;
case SPEED_2500:
- rate = AQ_NIC_RATE_2GS;
+ rate = AQ_NIC_RATE_2G5;
break;
case SPEED_5000:
@@ -1006,11 +1098,7 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
- u32 fw_version = 0U;
-
- self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
-
- return fw_version;
+ return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
}
int aq_nic_set_loopback(struct aq_nic_s *self)
@@ -1101,9 +1189,11 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
if (!self)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ for (i = 0U; i < self->aq_vecs; i++) {
+ aq_vec = self->aq_vec[i];
aq_vec_deinit(aq_vec);
+ aq_vec_ring_free(aq_vec);
+ }
aq_ptp_unregister(self);
aq_ptp_ring_deinit(self);
@@ -1136,6 +1226,22 @@ void aq_nic_free_vectors(struct aq_nic_s *self)
err_exit:;
}
+int aq_nic_realloc_vectors(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
+
+ aq_nic_free_vectors(self);
+
+ for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
+ cfg);
+ if (unlikely(!self->aq_vec[self->aq_vecs]))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void aq_nic_shutdown(struct aq_nic_s *self)
{
int err = 0;
@@ -1201,3 +1307,98 @@ void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
break;
}
}
+
+int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ const unsigned int prev_vecs = cfg->vecs;
+ bool ndev_running;
+ int err = 0;
+ int i;
+
+ /* if already the same configuration or
+ * disable request (tcs is 0) and we already is disabled
+ */
+ if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
+ return 0;
+
+ ndev_running = netif_running(self->ndev);
+ if (ndev_running)
+ dev_close(self->ndev);
+
+ cfg->tcs = tcs;
+ if (cfg->tcs == 0)
+ cfg->tcs = 1;
+ if (prio_tc_map)
+ memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
+ else
+ for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
+ cfg->prio_tc_map[i] = cfg->tcs * i / 8;
+
+ cfg->is_qos = (tcs != 0 ? true : false);
+ cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
+ if (!cfg->is_ptp)
+ netdev_warn(self->ndev, "%s\n",
+ "PTP is auto disabled due to requested TC count.");
+
+ netdev_set_num_tc(self->ndev, cfg->tcs);
+
+ /* Changing the number of TCs might change the number of vectors */
+ aq_nic_cfg_update_num_vecs(self);
+ if (prev_vecs != cfg->vecs) {
+ err = aq_nic_realloc_vectors(self);
+ if (err)
+ goto err_exit;
+ }
+
+ if (ndev_running)
+ err = dev_open(self->ndev, NULL);
+
+err_exit:
+ return err;
+}
+
+int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 max_rate)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (tc >= AQ_CFG_TCS_MAX)
+ return -EINVAL;
+
+ if (max_rate && max_rate < 10) {
+ netdev_warn(self->ndev,
+ "Setting %s to the minimum usable value of %dMbps.\n",
+ "max rate", 10);
+ cfg->tc_max_rate[tc] = 10;
+ } else {
+ cfg->tc_max_rate[tc] = max_rate;
+ }
+
+ return 0;
+}
+
+int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 min_rate)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (tc >= AQ_CFG_TCS_MAX)
+ return -EINVAL;
+
+ if (min_rate)
+ set_bit(tc, &cfg->tc_min_rate_msk);
+ else
+ clear_bit(tc, &cfg->tc_min_rate_msk);
+
+ if (min_rate && min_rate < 20) {
+ netdev_warn(self->ndev,
+ "Setting %s to the minimum usable value of %dMbps.\n",
+ "min rate", 20);
+ cfg->tc_min_rate[tc] = 20;
+ } else {
+ cfg->tc_min_rate[tc] = min_rate;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 0663b8d0220d..2ab003065e62 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -59,8 +59,15 @@ struct aq_nic_cfg_s {
bool is_polling;
bool is_rss;
bool is_lro;
+ bool is_qos;
+ bool is_ptp;
+ enum aq_tc_mode tc_mode;
u32 priv_flags;
u8 tcs;
+ u8 prio_tc_map[8];
+ u32 tc_max_rate[AQ_CFG_TCS_MAX];
+ unsigned long tc_min_rate_msk;
+ u32 tc_min_rate[AQ_CFG_TCS_MAX];
struct aq_rss_parameters aq_rss;
u32 eee_speeds;
};
@@ -77,8 +84,16 @@ struct aq_nic_cfg_s {
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
WAKE_PHY)
-#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
- ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+#define AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) \
+ (((_NIC_CFG_)->tc_mode == AQ_TC_MODE_4TCS) ? 8 : 4)
+
+#define AQ_NIC_CFG_TCVEC2RING(_NIC_CFG_, _TC_, _VEC_) \
+ ((_TC_) * AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) + (_VEC_))
+
+#define AQ_NIC_RING2QMAP(_NIC_, _ID_) \
+ ((_ID_) / AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg) * \
+ (_NIC_)->aq_vecs + \
+ ((_ID_) % AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg)))
struct aq_hw_rx_fl2 {
struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
@@ -104,7 +119,7 @@ struct aq_nic_s {
atomic_t flags;
u32 msg_enable;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
- struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
struct net_device *ndev;
unsigned int aq_vecs;
@@ -164,6 +179,7 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
void aq_nic_set_power(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
void aq_nic_free_vectors(struct aq_nic_s *self);
+int aq_nic_realloc_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
@@ -181,4 +197,9 @@ void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
u32 location);
+int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map);
+int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 max_rate);
+int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
+ const u32 min_rate);
#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 8a70ffe1d326..41c0f560f95b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -16,6 +16,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl2/hw_atl2.h"
#include "aq_filters.h"
#include "aq_drvinfo.h"
#include "aq_macsec.h"
@@ -41,6 +42,13 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+
{}
};
@@ -70,6 +78,13 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
{ AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
{ AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
+
+ { AQ_DEVICE_ID_AQC113DEV, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
@@ -104,10 +119,8 @@ int aq_pci_func_init(struct pci_dev *pdev)
int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
+ if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-
- }
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
@@ -237,6 +250,15 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_ioremap;
}
self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
+ if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
+ int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
+
+ self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
+ if (!self->aq_hw->priv) {
+ err = -ENOMEM;
+ goto err_free_aq_hw;
+ }
+ }
for (bar = 0; bar < 4; ++bar) {
if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
@@ -245,19 +267,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
mmio_pa = pci_resource_start(pdev, bar);
if (mmio_pa == 0U) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
reg_sz = pci_resource_len(pdev, bar);
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
break;
}
@@ -265,7 +287,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
if (bar == 4) {
err = -EIO;
- goto err_free_aq_hw;
+ goto err_free_aq_hw_priv;
}
numvecs = min((u8)AQ_CFG_VECS_DEF,
@@ -305,6 +327,8 @@ err_register:
aq_pci_free_irq_vectors(self);
err_hwinit:
iounmap(self->aq_hw->mmio);
+err_free_aq_hw_priv:
+ kfree(self->aq_hw->priv);
err_free_aq_hw:
kfree(self->aq_hw);
err_ioremap:
@@ -332,6 +356,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
iounmap(self->aq_hw->mmio);
+ kfree(self->aq_hw->priv);
kfree(self->aq_hw);
pci_release_regions(pdev);
free_netdev(self->ndev);
@@ -406,6 +431,9 @@ static int atl_resume_common(struct device *dev, bool deep)
netif_tx_start_all_queues(nic->ndev);
err_exit:
+ if (ret < 0)
+ aq_nic_deinit(nic, true);
+
rtnl_unlock();
return ret;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 58e8c641e8b3..599ced261b2a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -945,26 +945,29 @@ void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
#define PTP_4TC_RING_IDX 16
#define PTP_HWST_RING_IDX 31
+/* Index must be 8 (8 TCs) or 16 (4 TCs).
+ * It depends on Traffic Class mode.
+ */
+static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode)
+{
+ if (tc_mode == AQ_TC_MODE_8TCS)
+ return PTP_8TC_RING_IDX;
+
+ return PTP_4TC_RING_IDX;
+}
+
int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
unsigned int tx_ring_idx, rx_ring_idx;
struct aq_ring_s *hwts;
- u32 tx_tc_mode, rx_tc_mode;
struct aq_ring_s *ring;
int err;
if (!aq_ptp)
return 0;
- /* Index must to be 8 (8 TCs) or 16 (4 TCs).
- * It depends from Traffic Class mode.
- */
- aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
- if (tx_tc_mode == 0)
- tx_ring_idx = PTP_8TC_RING_IDX;
- else
- tx_ring_idx = PTP_4TC_RING_IDX;
+ tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
tx_ring_idx, &aq_nic->aq_nic_cfg);
@@ -973,11 +976,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
goto err_exit;
}
- aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
- if (rx_tc_mode == 0)
- rx_ring_idx = PTP_8TC_RING_IDX;
- else
- rx_ring_idx = PTP_4TC_RING_IDX;
+ rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
rx_ring_idx, &aq_nic->aq_nic_cfg);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index bae95a618560..68fdb3994088 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -232,8 +232,11 @@ void aq_ring_queue_wake(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
- if (__netif_subqueue_stopped(ndev, ring->idx)) {
- netif_wake_subqueue(ndev, ring->idx);
+ if (__netif_subqueue_stopped(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic,
+ ring->idx))) {
+ netif_wake_subqueue(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
ring->stats.tx.queue_restarts++;
}
}
@@ -242,8 +245,11 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
- if (!__netif_subqueue_stopped(ndev, ring->idx))
- netif_stop_subqueue(ndev, ring->idx);
+ if (!__netif_subqueue_stopped(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic,
+ ring->idx)))
+ netif_stop_subqueue(ndev,
+ AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
}
bool aq_ring_tx_clean(struct aq_ring_s *self)
@@ -466,7 +472,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
/* Send all PTP traffic to 0 queue */
- skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(self->aq_nic,
+ self->idx));
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f40a427970dc..d1d43c8ce400 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -103,16 +103,11 @@ err_exit:
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
- struct aq_ring_s *ring = NULL;
struct aq_vec_s *self = NULL;
- unsigned int i = 0U;
- int err = 0;
self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self) {
- err = -ENOMEM;
+ if (!self)
goto err_exit;
- }
self->aq_nic = aq_nic;
self->aq_ring_param.vec_idx = idx;
@@ -128,10 +123,20 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+err_exit:
+ return self;
+}
+
+int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
for (i = 0; i < aq_nic_cfg->tcs; ++i) {
- unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
- self->tx_rings,
- self->aq_ring_param.vec_idx);
+ const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
+ i, idx);
ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
idx_ring, aq_nic_cfg);
@@ -156,11 +161,11 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
err_exit:
if (err < 0) {
- aq_vec_free(self);
+ aq_vec_ring_free(self);
self = NULL;
}
- return self;
+ return err;
}
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
@@ -270,6 +275,18 @@ err_exit:;
void aq_vec_free(struct aq_vec_s *self)
{
+ if (!self)
+ goto err_exit;
+
+ netif_napi_del(&self->napi);
+
+ kfree(self);
+
+err_exit:;
+}
+
+void aq_vec_ring_free(struct aq_vec_s *self)
+{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
@@ -279,13 +296,12 @@ void aq_vec_free(struct aq_vec_s *self)
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
aq_ring_free(&ring[AQ_VEC_TX_ID]);
- aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ if (i < self->rx_rings)
+ aq_ring_free(&ring[AQ_VEC_RX_ID]);
}
- netif_napi_del(&self->napi);
-
- kfree(self);
-
+ self->tx_rings = 0;
+ self->rx_rings = 0;
err_exit:;
}
@@ -333,16 +349,14 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
return &self->aq_ring_param.affinity_mask;
}
-void aq_vec_add_stats(struct aq_vec_s *self,
- struct aq_ring_stats_rx_s *stats_rx,
- struct aq_ring_stats_tx_s *stats_tx)
+static void aq_vec_add_stats(struct aq_vec_s *self,
+ const unsigned int tc,
+ struct aq_ring_stats_rx_s *stats_rx,
+ struct aq_ring_stats_tx_s *stats_tx)
{
- struct aq_ring_s *ring = NULL;
- unsigned int r = 0U;
+ struct aq_ring_s *ring = self->ring[tc];
- for (r = 0U, ring = self->ring[0];
- self->tx_rings > r; ++r, ring = self->ring[r]) {
- struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
+ if (tc < self->rx_rings) {
struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
stats_rx->packets += rx->packets;
@@ -353,6 +367,10 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_rx->pg_losts += rx->pg_losts;
stats_rx->pg_flips += rx->pg_flips;
stats_rx->pg_reuses += rx->pg_reuses;
+ }
+
+ if (tc < self->tx_rings) {
+ struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
@@ -361,7 +379,8 @@ void aq_vec_add_stats(struct aq_vec_s *self,
}
}
-int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
+int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
+ unsigned int *p_count)
{
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
@@ -369,7 +388,8 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
- aq_vec_add_stats(self, &stats_rx, &stats_tx);
+
+ aq_vec_add_stats(self, tc, &stats_rx, &stats_tx);
/* This data should mimic aq_ethtool_queue_stat_names structure
*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 0fe8e0904c7f..541af85e6510 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -25,17 +25,17 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg);
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
+void aq_vec_ring_free(struct aq_vec_s *self);
int aq_vec_start(struct aq_vec_s *self);
void aq_vec_stop(struct aq_vec_s *self);
cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
-int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data,
+int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
unsigned int *p_count);
-void aq_vec_add_stats(struct aq_vec_s *self,
- struct aq_ring_stats_rx_s *stats_rx,
- struct aq_ring_stats_tx_s *stats_tx);
#endif /* AQ_VEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 9b1062b8af64..a312864969af 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
@@ -20,7 +21,7 @@
.msix_irqs = 4U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_A0_RSS_MAX, \
- .tcs = HW_ATL_A0_TC_MAX, \
+ .tcs_max = HW_ATL_A0_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL_A0_RXD_SIZE, \
.rxds_max = HW_ATL_A0_MAX_RXD, \
@@ -47,7 +48,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -57,7 +58,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -66,7 +67,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -74,7 +75,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = AQ_NIC_RATE_2GS |
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -135,10 +136,10 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0U, 0xFFF);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0U, 0x64);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0U, 0x50);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0U, 0x1E);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
@@ -267,8 +268,7 @@ static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
- 0x00010000U : 0x00000000U);
+ aq_hw_write_reg(self, 0x00007040U, 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
@@ -886,6 +886,8 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
}
const struct aq_hw_ops hw_atl_ops_a0 = {
+ .hw_soft_reset = hw_atl_utils_soft_reset,
+ .hw_prepare = hw_atl_utils_initfw,
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
.hw_reset = hw_atl_a0_hw_reset,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d20d91cdece8..14d79f70cad7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
@@ -22,7 +23,7 @@
.msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_B0_RSS_MAX, \
- .tcs = HW_ATL_B0_TC_MAX, \
+ .tcs_max = HW_ATL_B0_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL_B0_RXD_SIZE, \
.rxds_max = HW_ATL_B0_MAX_RXD, \
@@ -45,7 +46,8 @@
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_GSO_UDP_L4 | \
- NETIF_F_GSO_PARTIAL, \
+ NETIF_F_GSO_PARTIAL | \
+ NETIF_F_HW_TC, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -59,7 +61,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -69,7 +71,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -78,7 +80,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_5G |
- AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -86,7 +88,7 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = AQ_NIC_RATE_2GS |
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M,
};
@@ -113,12 +115,34 @@ static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
return 0;
}
+static int hw_atl_b0_tc_ptp_set(struct aq_hw_s *self)
+{
+ /* Init TC2 for PTP_TX */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
+ AQ_HW_PTP_TC);
+
+ /* Init TC2 for PTP_RX */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
+ AQ_HW_PTP_TC);
+ /* No flow control for PTP */
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, AQ_HW_PTP_TC);
+
+ return aq_hw_err_from_flags(self);
+}
+
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
- unsigned int i_priority = 0U;
- u32 buff_size = 0U;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX;
+ u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX;
+ unsigned int prio = 0U;
u32 tc = 0U;
+ if (cfg->is_ptp) {
+ tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE;
+ rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE;
+ }
+
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
@@ -126,69 +150,45 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* TPS VM init */
hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
- /* TPS TC credits init */
- hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
-
- tc = 0;
-
- /* TX Packet Scheduler Data TC0 */
- hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
- hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
- hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
- hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
-
- /* Tx buf size TC0 */
- buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
-
- hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024 / 32U) * 66U) /
- 100U, tc);
- hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024 / 32U) * 50U) /
- 100U, tc);
- /* Init TC2 for PTP_TX */
- tc = 2;
+ tx_buff_size /= cfg->tcs;
+ rx_buff_size /= cfg->tcs;
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ u32 threshold = 0U;
- hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
- tc);
+ /* Tx buf size TC0 */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
- /* QoS Rx buf size per TC */
- tc = 0;
- buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
+ threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
- hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
+ threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
- hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+ /* QoS Rx buf size per TC */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
- /* Init TC2 for PTP_RX */
- tc = 2;
+ threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
- hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
- tc);
- /* No flow control for PTP */
- hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
+ threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
+ }
+
+ if (cfg->is_ptp)
+ hw_atl_b0_tc_ptp_set(self);
/* QoS 802.1p priority -> TC mapping */
- for (i_priority = 8U; i_priority--;)
- hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ for (prio = 0; prio < 8; ++prio)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
+ cfg->prio_tc_map[prio]);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
- struct aq_rss_parameters *rss_params)
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int addr = 0U;
@@ -251,9 +251,10 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg)
+int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
{
+ u64 rxcsum = !!(aq_nic_cfg->features & NETIF_F_RXCSUM);
unsigned int i;
/* TX checksums offloads*/
@@ -261,10 +262,8 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
- NETIF_F_RXCSUM));
- hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
- NETIF_F_RXCSUM));
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, rxcsum);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, rxcsum);
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -272,7 +271,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
/* Outer VLAN tag offload */
hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
-/* LRO offloads */
+ /* LRO offloads */
{
unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
@@ -311,10 +310,124 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
+{
+ static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
+ /* Scale factor is based on the number of bits in fractional portion */
+ static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
+ static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT;
+ const u32 link_speed = self->aq_link_status.mbps;
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+ unsigned long num_min_rated_tcs = 0;
+ u32 tc_weight[AQ_CFG_TCS_MAX];
+ u32 fixed_max_credit;
+ u8 min_rate_msk = 0;
+ u32 sum_weight = 0;
+ int tc;
+
+ /* By default max_credit is based upon MTU (in unit of 64b) */
+ fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
+
+ if (link_speed) {
+ min_rate_msk = nic_cfg->tc_min_rate_msk &
+ (BIT(nic_cfg->tcs) - 1);
+ num_min_rated_tcs = hweight8(min_rate_msk);
+ }
+
+ /* First, calculate weights where min_rate is specified */
+ if (num_min_rated_tcs) {
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ if (!nic_cfg->tc_min_rate[tc]) {
+ tc_weight[tc] = 0;
+ continue;
+ }
+
+ tc_weight[tc] = (-1L + link_speed +
+ nic_cfg->tc_min_rate[tc] *
+ max_weight) /
+ link_speed;
+ tc_weight[tc] = min(tc_weight[tc], max_weight);
+ sum_weight += tc_weight[tc];
+ }
+ }
+
+ /* WSP, if min_rate is set for at least one TC.
+ * RR otherwise.
+ */
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+ /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
+ * leave Descriptor TC Arbiter as RR.
+ */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+
+ hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
+
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+ u32 weight, max_credit;
+
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
+ fixed_max_credit);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
+
+ if (num_min_rated_tcs) {
+ weight = tc_weight[tc];
+
+ if (!weight && sum_weight < max_weight)
+ weight = (max_weight - sum_weight) /
+ (nic_cfg->tcs - num_min_rated_tcs);
+ else if (!weight)
+ weight = 0x64;
+
+ max_credit = max(8 * weight, fixed_max_credit);
+ } else {
+ weight = 0x64;
+ max_credit = 0xFFF;
+ }
+
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
+ max_credit);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
+
+ if (en) {
+ /* Nominal rate is always 10G */
+ const u32 rate = 10000U * scale /
+ nic_cfg->tc_max_rate[tc];
+ const u32 rate_int = rate >>
+ HW_ATL_TPS_DESC_RATE_Y_WIDTH;
+ const u32 rate_frac = rate & frac_msk;
+
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
+ } else {
+ /* A value of 1 indicates the queue is not
+ * rate controlled.
+ */
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+ }
+ for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+
/* Tx TC/Queue number config */
- hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
+ hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
@@ -324,7 +437,7 @@ static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+ aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
0x00010000U : 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
@@ -334,20 +447,32 @@ static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
+void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 rss_ctrl1 = HW_ATL_RSS_DISABLED;
+
+ if (cfg->is_rss)
+ rss_ctrl1 = (cfg->tc_mode == AQ_TC_MODE_8TCS) ?
+ HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS :
+ HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS;
+
+ hw_atl_reg_rx_flr_rss_control1set(self, rss_ctrl1);
+}
+
static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int i;
/* Rx TC/RSS number config */
- hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
/* Rx flow control */
hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
- 0xB3333333U : 0x00000000U);
+ hw_atl_b0_hw_init_rx_rss_ctrl1(self);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
@@ -372,8 +497,8 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- aq_hw_write_reg(self, 0x00005040U,
- IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
+ aq_hw_write_reg(self, 0x00005040U, ATL_HW_IS_CHIP_FEATURE(self, RPF2) ?
+ 0x000F0000U : 0x00000000U);
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
@@ -384,7 +509,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -479,23 +604,21 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_start(struct aq_hw_s *self)
+int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
@@ -511,9 +634,8 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
return 0;
}
-static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
- struct aq_ring_s *ring,
- unsigned int frags)
+int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int frags)
{
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
@@ -600,9 +722,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
- struct aq_ring_s *aq_ring,
- struct aq_ring_param_s *aq_ring_param)
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
@@ -643,9 +764,8 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
- struct aq_ring_s *aq_ring,
- struct aq_ring_param_s *aq_ring_param)
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
@@ -673,9 +793,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
- struct aq_ring_s *ring,
- unsigned int sw_tail_old)
+int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
{
for (; sw_tail_old != ring->sw_tail;
sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
@@ -734,8 +853,8 @@ static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
{
unsigned int hw_head_;
int err = 0;
@@ -753,8 +872,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
{
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
@@ -854,14 +972,14 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
@@ -871,7 +989,7 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
@@ -880,8 +998,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
-static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
- unsigned int packet_filter)
+int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int i = 0U;
@@ -1071,34 +1189,20 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
- struct aq_ring_s *ring)
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
-{
- *tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
- return aq_hw_err_from_flags(self);
-}
-
-static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
-{
- *tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
- return aq_hw_err_from_flags(self);
-}
-
#define get_ptp_ts_val_u64(self, indx) \
((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
@@ -1478,6 +1582,8 @@ static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
}
const struct aq_hw_ops hw_atl_ops_b0 = {
+ .hw_soft_reset = hw_atl_utils_soft_reset,
+ .hw_prepare = hw_atl_utils_initfw,
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
.hw_reset = hw_atl_b0_hw_reset,
@@ -1510,13 +1616,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_tc_rate_limit_set = hw_atl_b0_hw_init_tx_tc_rate_limit,
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
- .hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
- .hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
-
.hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
.hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index 09af1683034b..30f468f2084d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
@@ -33,4 +34,41 @@ extern const struct aq_hw_ops hw_atl_ops_b0;
#define hw_atl_ops_b1 hw_atl_ops_b0
+int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
+int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int sw_tail_old);
+int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
+ unsigned int frags);
+int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring);
+
+int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
+int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
+
+void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
+
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
+
+int hw_atl_b0_hw_start(struct aq_hw_s *self);
+
+int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask);
+int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask);
+
+int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter);
+
#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 7ab23a1751d3..cf460d61a45e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -75,7 +75,7 @@
#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
#define HW_ATL_B0_TCRSS_4_8 1
-#define HW_ATL_B0_TC_MAX 1U
+#define HW_ATL_B0_TC_MAX 8U
#define HW_ATL_B0_RSS_MAX 8U
#define HW_ATL_B0_LRO_RXD_MAX 16U
@@ -151,6 +151,10 @@
#define HW_ATL_B0_MAX_RXD 8184U
#define HW_ATL_B0_MAX_TXD 8184U
+#define HW_ATL_RSS_DISABLED 0x00000000U
+#define HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS 0xA2222222U
+#define HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS 0x80003333U
+
/* HW layer capabilities */
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index d1f68fc16291..3c8e8047ea1e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -693,6 +693,13 @@ void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
+u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT);
+}
+
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en)
{
@@ -747,7 +754,7 @@ void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
}
void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc)
+ u32 user_priority, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
@@ -766,10 +773,9 @@ void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
};
- aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
- rpf_rpb_rx_tc_upt_msk[tc],
- rpf_rpb_rx_tc_upt_shft[tc],
- user_priority_tc_map);
+ aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[user_priority],
+ rpf_rpb_rx_tc_upt_msk[user_priority],
+ rpf_rpb_rx_tc_upt_shft[user_priority], tc);
}
void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
@@ -867,6 +873,13 @@ void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
vlan_prom_mode_en);
}
+u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT);
+}
+
void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_acc_untagged_packets)
{
@@ -1304,14 +1317,14 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
HW_ATL_TPB_TX_TC_MODE_MSK,
HW_ATL_TPB_TX_TC_MODE_SHIFT);
}
-void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
@@ -1450,8 +1463,8 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 max_credit,
- u32 tc)
+ const u32 tc,
+ const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
@@ -1460,13 +1473,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
- u32 tc)
+ const u32 tc,
+ const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
- tx_pkt_shed_desc_tc_weight);
+ weight);
}
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
@@ -1479,8 +1492,8 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 max_credit,
- u32 tc)
+ const u32 tc,
+ const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
@@ -1489,13 +1502,49 @@ void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
- u32 tc)
+ const u32 tc,
+ const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
- tx_pkt_shed_tc_data_weight);
+ weight);
+}
+
+void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
+ const u32 rate_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_TX_DESC_RATE_MODE_ADR,
+ HW_ATL_TPS_TX_DESC_RATE_MODE_MSK,
+ HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT,
+ rate_mode);
+}
+
+void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_EN_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_EN_MSK,
+ HW_ATL_TPS_DESC_RATE_EN_SHIFT,
+ enable);
+}
+
+void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_int)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_X_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_X_MSK,
+ HW_ATL_TPS_DESC_RATE_X_SHIFT,
+ rate_int);
+}
+
+void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_frac)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_Y_ADR(desc),
+ HW_ATL_TPS_DESC_RATE_Y_MSK,
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT,
+ rate_frac);
}
/* tx */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 62992b23c0e8..61a6f70c51cd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -349,6 +349,9 @@ void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
u32 l2multicast_flr_en,
u32 filter);
+/* get l2 promiscuous mode enable */
+u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw);
+
/* set l2 promiscuous mode enable */
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en);
@@ -420,6 +423,9 @@ void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
u32 vlan_prom_mode_en);
+/* Get VLAN promiscuous mode enable */
+u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw);
+
/* Set VLAN untagged action */
void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
u32 vlan_untagged_act);
@@ -610,11 +616,11 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
/* tpb */
/* set TX Traffic Class Mode */
-void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode);
/* get TX Traffic Class Mode */
-u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
@@ -682,13 +688,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
/* set tx packet scheduler descriptor tc max credit */
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 max_credit,
- u32 tc);
+ const u32 tc,
+ const u32 max_credit);
/* set tx packet scheduler descriptor tc weight */
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
- u32 tc);
+ const u32 tc,
+ const u32 weight);
/* set tx packet scheduler descriptor vm arbitration mode */
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
@@ -696,13 +702,29 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
/* set tx packet scheduler tc data max credit */
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 max_credit,
- u32 tc);
+ const u32 tc,
+ const u32 max_credit);
/* set tx packet scheduler tc data weight */
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
- u32 tc);
+ const u32 tc,
+ const u32 weight);
+
+/* set tx descriptor rate mode */
+void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
+ const u32 rate_mode);
+
+/* set tx packet scheduler descriptor rate enable */
+void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 enable);
+
+/* set tx packet scheduler descriptor rate integral value */
+void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_int);
+
+/* set tx packet scheduler descriptor rate fractional value */
+void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
+ const u32 rate_frac);
/* tx */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 18de2f7b8959..06220792daf1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2038,6 +2038,42 @@
/* default value of bitfield lso_tcp_flag_mid[b:0] */
#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
+/* tx tx_tc_mode bitfield definitions
+ * preprocessor definitions for the bitfield "tx_tc_mode".
+ * port="pif_tpb_tx_tc_mode_i,pif_tps_tx_tc_mode_i"
+ */
+
+/* register address for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
+/* bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
+/* lower bit position of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
+/* width of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
+/* default value of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
+
+/* tx tx_desc_rate_mode bitfield definitions
+ * preprocessor definitions for the bitfield "tx_desc_rate_mode".
+ * port="pif_tps_desc_rate_mode_i"
+ */
+
+/* register address for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_ADR 0x00007900
+/* bitmask for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSK 0x00000080
+/* inverted bitmask for bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSKN 0xFFFFFF7F
+/* lower bit position of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT 7
+/* width of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_WIDTH 1
+/* default value of bitfield tx_desc_rate_mode */
+#define HW_ATL_TPS_TX_DESC_RATE_MODE_DEFAULT 0x0
+
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
* port="pif_tpb_tx_buf_en_i"
@@ -2056,19 +2092,6 @@
/* default value of bitfield tx_buf_en */
#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
-/* register address for bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
-/* bitmask for bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
-/* inverted bitmask for bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
-/* lower bit position of bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
-/* width of bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
-/* default value of bitfield tx_tc_mode */
-#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
-
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
@@ -2270,6 +2293,58 @@
/* default value of bitfield data_tc_arb_mode */
#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
+/* tx desc{r}_rate_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_en".
+ * port="pif_tps_desc_rate_en_i[0]"
+ */
+
+/* register address for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_MSK 0x80000000
+/* inverted bitmask for bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_MSKN 0x7FFFFFFF
+/* lower bit position of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_SHIFT 31
+/* width of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_WIDTH 1
+/* default value of bitfield desc{r}_rate_en */
+#define HW_ATL_TPS_DESC_RATE_EN_DEFAULT 0x0
+
+/* tx desc{r}_rate_x bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_x".
+ * port="pif_tps_desc0_rate_x"
+ */
+/* register address for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_MSK 0x03FF0000
+/* inverted bitmask for bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_MSKN 0xFC00FFFF
+/* lower bit position of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_SHIFT 16
+/* width of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_WIDTH 10
+/* default value of bitfield desc{r}_rate_x */
+#define HW_ATL_TPS_DESC_RATE_X_DEFAULT 0x0
+
+/* tx desc{r}_rate_y bitfield definitions
+ * preprocessor definitions for the bitfield "desc{r}_rate_y".
+ * port="pif_tps_desc0_rate_y"
+ */
+/* register address for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_ADR(desc) (0x00007408 + (desc) * 0x10)
+/* bitmask for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_MSK 0x00003FFF
+/* inverted bitmask for bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_MSKN 0xFFFFC000
+/* lower bit position of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_SHIFT 0
+/* width of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_WIDTH 14
+/* default value of bitfield desc{r}_rate_y */
+#define HW_ATL_TPS_DESC_RATE_Y_DEFAULT 0x0
+
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
* port="pif_tps_desc_rate_ta_rst_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 354705f9bc49..73c0f41df8d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
@@ -53,7 +54,6 @@ enum mcp_area {
MCP_AREA_SETTINGS = 0x20000000,
};
-static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
@@ -67,14 +67,10 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err = 0;
- err = hw_atl_utils_soft_reset(self);
- if (err)
- return err;
-
hw_atl_utils_hw_chip_features_init(self,
&self->chip_features);
- hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
+ self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
self->fw_ver_actual) == 0) {
@@ -313,7 +309,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
for (++cnt; --cnt && !err;) {
aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
- if (IS_CHIP_FEATURE(REVISION_B1))
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
self, val, val != a,
1U, 1000U);
@@ -409,7 +405,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_B1))
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
else
err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
@@ -438,7 +434,7 @@ int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
p, cnt, MCP_AREA_SETTINGS);
}
-static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
@@ -501,7 +497,7 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
int err = 0;
- if (!IS_CHIP_FEATURE(MIPS)) {
+ if (!ATL_HW_IS_CHIP_FEATURE(self, MIPS)) {
err = -1;
goto err_exit;
}
@@ -607,7 +603,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
- if (IS_CHIP_FEATURE(REVISION_A0)) {
+ if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_A0)) {
unsigned int mtu = self->aq_nic_cfg ?
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
@@ -692,7 +688,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
link_status->mbps = 5000U;
break;
- case HAL_ATLANTIC_RATE_2GS:
+ case HAL_ATLANTIC_RATE_2G5:
link_status->mbps = 2500U;
break;
@@ -806,22 +802,24 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
u32 mif_rev = val & 0xFFU;
u32 chip_features = 0U;
+ chip_features |= ATL_HW_CHIP_ATLANTIC;
+
if ((0xFU & mif_rev) == 1U) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS;
+ chip_features |= ATL_HW_CHIP_REVISION_A0 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS;
} else if ((0xFU & mif_rev) == 2U) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS |
- HAL_ATLANTIC_UTILS_CHIP_TPO2 |
- HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ chip_features |= ATL_HW_CHIP_REVISION_B0 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS |
+ ATL_HW_CHIP_TPO2 |
+ ATL_HW_CHIP_RPF2;
} else if ((0xFU & mif_rev) == 0xAU) {
- chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
- HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
- HAL_ATLANTIC_UTILS_CHIP_MIPS |
- HAL_ATLANTIC_UTILS_CHIP_TPO2 |
- HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ chip_features |= ATL_HW_CHIP_REVISION_B1 |
+ ATL_HW_CHIP_MPI_AQ |
+ ATL_HW_CHIP_MIPS |
+ ATL_HW_CHIP_TPO2 |
+ ATL_HW_CHIP_RPF2;
}
*p = chip_features;
@@ -919,11 +917,9 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
return 0;
}
-int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
+u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
{
- *fw_version = aq_hw_read_reg(self, 0x18U);
-
- return 0;
+ return aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
}
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index b15513914636..0b4b54fc1de0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
@@ -360,6 +361,8 @@ struct aq_rx_filter_vlan {
u8 queue;
};
+#define HW_ATL_VLAN_MAX_FILTERS 16U
+
struct aq_rx_filter_l2 {
s8 queue;
u8 location;
@@ -406,17 +409,6 @@ enum hw_atl_rx_ctrl_registers_l3l4 {
#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
((location) - AQ_RX_FIRST_LOC_FL3L4)
-#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
-#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
-#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
-#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
-#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
-
-#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
- self->chip_features)
-
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
MPI_RESET = 1,
@@ -427,7 +419,7 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_10G BIT(0)
#define HAL_ATLANTIC_RATE_5G BIT(1)
#define HAL_ATLANTIC_RATE_5GSR BIT(2)
-#define HAL_ATLANTIC_RATE_2GS BIT(3)
+#define HAL_ATLANTIC_RATE_2G5 BIT(3)
#define HAL_ATLANTIC_RATE_1G BIT(4)
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
@@ -622,7 +614,7 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
-int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
@@ -643,6 +635,8 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_atl_utils_fw_rpc **rpc);
+int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
extern const struct aq_fw_ops aq_fw_1x_ops;
extern const struct aq_fw_ops aq_fw_2x_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 1ad10cc14918..eeedd8c90067 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
@@ -134,7 +135,7 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5GSR)
rate |= FW2X_RATE_5G;
- if (speed & AQ_NIC_RATE_2GS)
+ if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;
if (speed & AQ_NIC_RATE_1G)
@@ -155,7 +156,7 @@ static u32 fw2x_to_eee_mask(u32 speed)
if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK)
rate |= AQ_NIC_RATE_EEE_5G;
if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK)
- rate |= AQ_NIC_RATE_EEE_2GS;
+ rate |= AQ_NIC_RATE_EEE_2G5;
if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK)
rate |= AQ_NIC_RATE_EEE_1G;
@@ -170,7 +171,7 @@ static u32 eee_mask_to_fw2x(u32 speed)
rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK;
if (speed & AQ_NIC_RATE_EEE_5G)
rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK;
- if (speed & AQ_NIC_RATE_EEE_2GS)
+ if (speed & AQ_NIC_RATE_EEE_2G5)
rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK;
if (speed & AQ_NIC_RATE_EEE_1G)
rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK;
@@ -282,8 +283,6 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
u32 mac_addr[2] = { 0 };
int err = 0;
- u32 h = 0U;
- u32 l = 0U;
if (efuse_addr != 0) {
err = hw_atl_utils_fw_downld_dwords(self,
@@ -298,26 +297,6 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
ether_addr_copy(mac, (u8 *)mac_addr);
- if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
- unsigned int rnd = 0;
-
- get_random_bytes(&rnd, sizeof(unsigned int));
-
- l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
- h = 0x8001300EU;
-
- mac[5] = (u8)(0xFFU & l);
- l >>= 8;
- mac[4] = (u8)(0xFFU & l);
- l >>= 8;
- mac[3] = (u8)(0xFFU & l);
- l >>= 8;
- mac[2] = (u8)(0xFFU & l);
- mac[1] = (u8)(0xFFU & h);
- h >>= 8;
- mac[0] = (u8)(0xFFU & h);
- }
-
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
new file mode 100644
index 000000000000..8df9d4ef36f0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -0,0 +1,841 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "aq_ring.h"
+#include "aq_nic.h"
+#include "hw_atl/hw_atl_b0.h"
+#include "hw_atl/hw_atl_utils.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_llh_internal.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_internal.h"
+#include "hw_atl2_llh_internal.h"
+
+static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
+ u32 tag, u32 mask, u32 action);
+
+#define DEFAULT_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 8U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL2_RSS_MAX, \
+ .tcs_max = HW_ATL2_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL2_RXD_SIZE, \
+ .rxds_max = HW_ATL2_MAX_RXD, \
+ .rxds_min = HW_ATL2_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL2_TXD_SIZE, \
+ .txds_max = HW_ATL2_MAX_TXD, \
+ .txds_min = HW_ATL2_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL2_TX_RINGS, \
+ .rx_rings = HW_ATL2_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_GSO_UDP_L4 | \
+ NETIF_F_GSO_PARTIAL | \
+ NETIF_F_HW_TC, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL2_MTU_JUMBO, \
+ .mac_regs_count = 72, \
+ .hw_alive_check_addr = 0x10U, \
+ .priv_data_len = sizeof(struct hw_atl2_priv)
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR);
+}
+
+static int hw_atl2_hw_reset(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ int err;
+
+ err = hw_atl2_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ memset(priv, 0, sizeof(*priv));
+
+ self->aq_fw_ops->set_state(self, MPI_RESET);
+
+ err = aq_hw_err_from_flags(self);
+
+ return err;
+}
+
+static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ unsigned int tcs, q_per_tc;
+ unsigned int tc, q;
+ u32 rx_map = 0;
+ u32 tx_map = 0;
+
+ hw_atl2_tpb_tx_tc_q_rand_map_en_set(self, 1U);
+
+ switch (cfg->tc_mode) {
+ case AQ_TC_MODE_8TCS:
+ tcs = 8;
+ q_per_tc = 4;
+ break;
+ case AQ_TC_MODE_4TCS:
+ tcs = 4;
+ q_per_tc = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (tc = 0; tc != tcs; tc++) {
+ unsigned int tc_q_offset = tc * q_per_tc;
+
+ for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) {
+ rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q);
+ if (HW_ATL2_RX_Q_TC_MAP_ADR(q) !=
+ HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) {
+ aq_hw_write_reg(self,
+ HW_ATL2_RX_Q_TC_MAP_ADR(q),
+ rx_map);
+ rx_map = 0;
+ }
+
+ tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q);
+ if (HW_ATL2_TX_Q_TC_MAP_ADR(q) !=
+ HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) {
+ aq_hw_write_reg(self,
+ HW_ATL2_TX_Q_TC_MAP_ADR(q),
+ tx_map);
+ tx_map = 0;
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ u32 tx_buff_size = HW_ATL2_TXBUF_MAX;
+ u32 rx_buff_size = HW_ATL2_RXBUF_MAX;
+ unsigned int prio = 0U;
+ u32 tc = 0U;
+
+ /* TPS Descriptor rate init */
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ tx_buff_size /= cfg->tcs;
+ rx_buff_size /= cfg->tcs;
+ for (tc = 0; tc < cfg->tcs; tc++) {
+ u32 threshold = 0U;
+
+ /* Tx buf size TC0 */
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+ /* QoS Rx buf size per TC */
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
+
+ threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+ }
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (prio = 0; prio < 8; ++prio)
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
+ cfg->prio_tc_map[prio]);
+
+ /* ATL2 Apply ring to TC mapping */
+ hw_atl2_hw_queue_to_tc_map_set(self);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ const u32 num_tcs = aq_hw_num_tcs(self);
+ u32 rpf_redir2_enable;
+ int tc;
+ int i;
+
+ rpf_redir2_enable = num_tcs > 4 ? 1 : 0;
+
+ hw_atl2_rpf_redirection_table2_select_set(self, rpf_redir2_enable);
+
+ for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) {
+ for (tc = 0; tc != num_tcs; tc++) {
+ hw_atl2_new_rpf_rss_redir_set(self, tc, i,
+ tc *
+ aq_hw_q_per_tc(self) +
+ indirection_table[i]);
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
+{
+ static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
+ /* Scale factor is based on the number of bits in fractional portion */
+ static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
+ static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
+ HW_ATL_TPS_DESC_RATE_Y_SHIFT;
+ const u32 link_speed = self->aq_link_status.mbps;
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+ unsigned long num_min_rated_tcs = 0;
+ u32 tc_weight[AQ_CFG_TCS_MAX];
+ u32 fixed_max_credit_4b;
+ u32 fixed_max_credit;
+ u8 min_rate_msk = 0;
+ u32 sum_weight = 0;
+ int tc;
+
+ /* By default max_credit is based upon MTU (in unit of 64b) */
+ fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
+ /* in unit of 4b */
+ fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4;
+
+ if (link_speed) {
+ min_rate_msk = nic_cfg->tc_min_rate_msk &
+ (BIT(nic_cfg->tcs) - 1);
+ num_min_rated_tcs = hweight8(min_rate_msk);
+ }
+
+ /* First, calculate weights where min_rate is specified */
+ if (num_min_rated_tcs) {
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ if (!nic_cfg->tc_min_rate[tc]) {
+ tc_weight[tc] = 0;
+ continue;
+ }
+
+ tc_weight[tc] = (-1L + link_speed +
+ nic_cfg->tc_min_rate[tc] *
+ max_weight) /
+ link_speed;
+ tc_weight[tc] = min(tc_weight[tc], max_weight);
+ sum_weight += tc_weight[tc];
+ }
+ }
+
+ /* WSP, if min_rate is set for at least one TC.
+ * RR otherwise.
+ */
+ hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+ /* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
+ * leave Descriptor TC Arbiter as RR.
+ */
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+
+ hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
+
+ for (tc = 0; tc != nic_cfg->tcs; tc++) {
+ const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+ u32 weight, max_credit;
+
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
+ fixed_max_credit);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
+
+ if (num_min_rated_tcs) {
+ weight = tc_weight[tc];
+
+ if (!weight && sum_weight < max_weight)
+ weight = (max_weight - sum_weight) /
+ (nic_cfg->tcs - num_min_rated_tcs);
+ else if (!weight)
+ weight = 0x640;
+
+ max_credit = max(2 * weight, fixed_max_credit_4b);
+ } else {
+ weight = 0x640;
+ max_credit = 0xFFF0;
+ }
+
+ hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
+ hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
+ max_credit);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
+
+ if (en) {
+ /* Nominal rate is always 10G */
+ const u32 rate = 10000U * scale /
+ nic_cfg->tc_max_rate[tc];
+ const u32 rate_int = rate >>
+ HW_ATL_TPS_DESC_RATE_Y_WIDTH;
+ const u32 rate_frac = rate & frac_msk;
+
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
+ } else {
+ /* A value of 1 indicates the queue is not
+ * rate controlled.
+ */
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+ }
+ for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
+ const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
+
+ hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
+ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
+ hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
+
+ /* Tx TC/RSS number config */
+ hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
+
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
+
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map;
+ u16 action;
+ u8 index;
+ int i;
+
+ /* Action Resolver Table (ART) is used by RPF to decide which action
+ * to take with a packet based upon input tag and tag mask, where:
+ * - input tag is a combination of 3-bit VLan Prio (PTP) and
+ * 29-bit concatenation of all tags from filter block;
+ * - tag mask is a mask used for matching against input tag.
+ * The input_tag is compared with the all the Requested_tags in the
+ * Record table to find a match. Action field of the selected matched
+ * REC entry is used for further processing. If multiple entries match,
+ * the lowest REC entry, Action field will be selected.
+ */
+ hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF);
+ hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC,
+ HW_ATL2_MAC_UC);
+ hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC);
+
+ /* FW reserves the beginning of ART, thus all driver entries must
+ * start from the offset specified in FW caps.
+ */
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_UC_MASK |
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ HW_ATL2_ACTION_DROP);
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_VLAN_MASK |
+ HW_ATL2_RPF_TAG_UNTAG_MASK,
+ HW_ATL2_ACTION_DROP);
+
+ /* Configure ART to map given VLan Prio (PCP) to the TC index for
+ * RSS redirection table.
+ */
+ for (i = 0; i < 8; i++) {
+ action = HW_ATL2_ACTION_ASSIGN_TC(prio_tc_map[i]);
+
+ index = priv->art_base_index + HW_ATL2_RPF_PCP_TO_TC_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index,
+ i << HW_ATL2_RPF_TAG_PCP_OFFSET,
+ HW_ATL2_RPF_TAG_PCP_MASK, action);
+ }
+}
+
+static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
+ bool promisc)
+{
+ u16 off_action = (!promisc &&
+ !hw_atl_rpfl2promiscuous_mode_en_get(self)) ?
+ HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE;
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u8 index;
+
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_VLAN_MASK |
+ HW_ATL2_RPF_TAG_UNTAG_MASK, off_action);
+}
+
+static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc)
+{
+ u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP;
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ bool vlan_promisc_enable;
+ u8 index;
+
+ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
+ hw_atl2_act_rslvr_table_set(self, index, 0,
+ HW_ATL2_RPF_TAG_UC_MASK |
+ HW_ATL2_RPF_TAG_ALLMC_MASK,
+ off_action);
+
+ /* turn VLAN promisc mode too */
+ vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self);
+ hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc |
+ vlan_promisc_enable);
+}
+
+static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
+ u32 tag, u32 mask, u32 action)
+{
+ u32 val;
+ int err;
+
+ err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get,
+ self, val, val == 1,
+ 1, 10000U);
+ if (err)
+ return err;
+
+ hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask,
+ action);
+
+ hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR);
+
+ return err;
+}
+
+static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
+
+ /* Rx flow control */
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL);
+
+ /* RSS Ring selection */
+ hw_atl_b0_hw_init_rx_rss_ctrl1(self);
+
+ /* Multicast filters */
+ for (i = HW_ATL2_MAC_MAX; i--;) {
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U);
+
+ /* Vlan filters */
+ hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD);
+ hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q);
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
+
+ /* Always accept untagged packets */
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
+
+ hw_atl2_hw_init_new_rx_filters(self);
+
+ /* Rx Interrupts */
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+{
+ static u32 aq_hw_atl2_igcr_table_[4][2] = {
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
+ };
+
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ u8 base_index, count;
+ int err;
+
+ err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index,
+ &count);
+ if (err)
+ return err;
+
+ priv->art_base_index = 8 * base_index;
+
+ hw_atl2_init_launchtime(self);
+
+ hw_atl2_hw_init_tx_path(self);
+ hw_atl2_hw_init_rx_path(self);
+
+ hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
+
+ hw_atl2_hw_qos_set(self);
+ hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ hw_atl2_rpf_new_enable_set(self, 1);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL2_ERR_INT << 0x18) |
+ (1U << 0x1F)) |
+ ((HW_ATL2_ERR_INT << 0x10) |
+ (1U << 0x17)), 0U);
+
+ hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param);
+}
+
+static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC));
+
+ return hw_atl_b0_hw_packet_filter_set(self, packet_filter);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int err = 0;
+
+ if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) {
+ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (cfg->mc_list_count = 0U;
+ cfg->mc_list_count < count;
+ ++cfg->mc_list_count) {
+ u32 i = cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
+ HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
+ HW_ATL2_MAC_MIN + i);
+
+ hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i);
+
+ hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled),
+ HW_ATL2_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self)
+{
+ unsigned int i = 0U;
+ u32 itr_tx = 2U;
+ u32 itr_rx = 2U;
+
+ switch (self->aq_nic_cfg->itr) {
+ case AQ_CFG_INTERRUPT_MODERATION_ON:
+ case AQ_CFG_INTERRUPT_MODERATION_AUTO:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
+
+ if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+ /* HW timers are in 2us units */
+ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+ int tx_min_timer = tx_max_timer / 2;
+
+ int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+ int rx_min_timer = rx_max_timer / 2;
+
+ tx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
+ tx_max_timer);
+ tx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
+ tx_min_timer);
+ rx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
+ rx_max_timer);
+ rx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
+ rx_min_timer);
+
+ itr_tx |= tx_min_timer << 0x8U;
+ itr_tx |= tx_max_timer << 0x10U;
+ itr_rx |= rx_min_timer << 0x8U;
+ itr_rx |= rx_max_timer << 0x10U;
+ } else {
+ static unsigned int hw_atl2_timers_table_tx_[][2] = {
+ {0xfU, 0xffU}, /* 10Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xfU, 0x1ffU}, /* 2.5Gbit */
+ {0xfU, 0x1ffU}, /* 1Gbit */
+ {0xfU, 0x1ffU}, /* 100Mbit */
+ };
+ static unsigned int hw_atl2_timers_table_rx_[][2] = {
+ {0x6U, 0x38U},/* 10Gbit */
+ {0xCU, 0x70U},/* 5Gbit */
+ {0xCU, 0x70U},/* 5Gbit 5GS */
+ {0x18U, 0xE0U},/* 2.5Gbit */
+ {0x30U, 0x80U},/* 1Gbit */
+ {0x4U, 0x50U},/* 100Mbit */
+ };
+ unsigned int mbps = self->aq_link_status.mbps;
+ unsigned int speed_index;
+
+ speed_index = hw_atl_utils_mbps_2_speed_index(mbps);
+
+ /* Update user visible ITR settings */
+ self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_
+ [speed_index][1] * 2;
+ self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_
+ [speed_index][1] * 2;
+
+ itr_tx |= hw_atl2_timers_table_tx_
+ [speed_index][0] << 0x8U;
+ itr_tx |= hw_atl2_timers_table_tx_
+ [speed_index][1] << 0x10U;
+
+ itr_rx |= hw_atl2_timers_table_rx_
+ [speed_index][0] << 0x8U;
+ itr_rx |= hw_atl2_timers_table_rx_
+ [speed_index][1] << 0x10U;
+ }
+ break;
+ case AQ_CFG_INTERRUPT_MODERATION_OFF:
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
+ itr_tx = 0U;
+ itr_rx = 0U;
+ break;
+ }
+
+ for (i = HW_ATL2_RINGS_MAX; i--;) {
+ hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
+
+ return 0;
+}
+
+static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
+{
+ return &self->curr_stats;
+}
+
+static int hw_atl2_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ u32 queue;
+ u8 index;
+ int i;
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1U);
+
+ for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) {
+ queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue);
+
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index, 0, 0,
+ HW_ATL2_ACTION_DISABLE);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id, i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+
+ hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i);
+
+ index = priv->art_base_index +
+ HW_ATL2_RPF_VLAN_USER_INDEX + i;
+ hw_atl2_act_rslvr_table_set(self, index,
+ (i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET,
+ HW_ATL2_RPF_TAG_VLAN_MASK, queue);
+ } else {
+ hw_atl2_rpf_vlan_flr_tag_set(self, 1, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vlan filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
+ hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
+const struct aq_hw_ops hw_atl2_ops = {
+ .hw_soft_reset = hw_atl2_utils_soft_reset,
+ .hw_prepare = hw_atl2_utils_initfw,
+ .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
+ .hw_init = hw_atl2_hw_init,
+ .hw_reset = hw_atl2_hw_reset,
+ .hw_start = hw_atl_b0_hw_start,
+ .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
+ .hw_stop = hw_atl2_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_b0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_b0_hw_irq_disable,
+ .hw_irq_read = hw_atl_b0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl2_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl2_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl2_hw_packet_filter_set,
+ .hw_filter_vlan_set = hw_atl2_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl,
+ .hw_multicast_list_set = hw_atl2_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl2_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_tc_rate_limit_set = hw_atl2_hw_init_tx_tc_rate_limit,
+ .hw_get_hw_stats = hw_atl2_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl2_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
new file mode 100644
index 000000000000..de8723f1c28a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_H
+#define HW_ATL2_H
+
+#include "aq_common.h"
+
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_ops hw_atl2_ops;
+
+#endif /* HW_ATL2_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h
new file mode 100644
index 000000000000..5a89bb8722f9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_INTERNAL_H
+#define HW_ATL2_INTERNAL_H
+
+#include "hw_atl2_utils.h"
+
+#define HW_ATL2_MTU_JUMBO 16352U
+#define HW_ATL2_MTU 1514U
+
+#define HW_ATL2_TX_RINGS 4U
+#define HW_ATL2_RX_RINGS 4U
+
+#define HW_ATL2_RINGS_MAX 32U
+#define HW_ATL2_TXD_SIZE (16U)
+#define HW_ATL2_RXD_SIZE (16U)
+
+#define HW_ATL2_MAC_UC 0U
+#define HW_ATL2_MAC_MIN 1U
+#define HW_ATL2_MAC_MAX 38U
+
+/* interrupts */
+#define HW_ATL2_ERR_INT 8U
+#define HW_ATL2_INT_MASK (0xFFFFFFFFU)
+
+#define HW_ATL2_TXBUF_MAX 128U
+#define HW_ATL2_RXBUF_MAX 192U
+
+#define HW_ATL2_RSS_REDIRECTION_MAX 64U
+
+#define HW_ATL2_TC_MAX 8U
+#define HW_ATL2_RSS_MAX 8U
+
+#define HW_ATL2_INTR_MODER_MAX 0x1FF
+#define HW_ATL2_INTR_MODER_MIN 0xFF
+
+#define HW_ATL2_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL2_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL2_MAX_RXD 8184U
+#define HW_ATL2_MAX_TXD 8184U
+
+#define HW_ATL2_FW_SM_ACT_RSLVR 0x3U
+
+#define HW_ATL2_RPF_TAG_UC_OFFSET 0x0
+#define HW_ATL2_RPF_TAG_ALLMC_OFFSET 0x6
+#define HW_ATL2_RPF_TAG_ET_OFFSET 0x7
+#define HW_ATL2_RPF_TAG_VLAN_OFFSET 0xA
+#define HW_ATL2_RPF_TAG_UNTAG_OFFSET 0xE
+#define HW_ATL2_RPF_TAG_L3_V4_OFFSET 0xF
+#define HW_ATL2_RPF_TAG_L3_V6_OFFSET 0x12
+#define HW_ATL2_RPF_TAG_L4_OFFSET 0x15
+#define HW_ATL2_RPF_TAG_L4_FLEX_OFFSET 0x18
+#define HW_ATL2_RPF_TAG_FLEX_OFFSET 0x1B
+#define HW_ATL2_RPF_TAG_PCP_OFFSET 0x1D
+
+#define HW_ATL2_RPF_TAG_UC_MASK (0x0000003F << HW_ATL2_RPF_TAG_UC_OFFSET)
+#define HW_ATL2_RPF_TAG_ALLMC_MASK (0x00000001 << HW_ATL2_RPF_TAG_ALLMC_OFFSET)
+#define HW_ATL2_RPF_TAG_UNTAG_MASK (0x00000001 << HW_ATL2_RPF_TAG_UNTAG_OFFSET)
+#define HW_ATL2_RPF_TAG_VLAN_MASK (0x0000000F << HW_ATL2_RPF_TAG_VLAN_OFFSET)
+#define HW_ATL2_RPF_TAG_ET_MASK (0x00000007 << HW_ATL2_RPF_TAG_ET_OFFSET)
+#define HW_ATL2_RPF_TAG_L3_V4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V4_OFFSET)
+#define HW_ATL2_RPF_TAG_L3_V6_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V6_OFFSET)
+#define HW_ATL2_RPF_TAG_L4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L4_OFFSET)
+#define HW_ATL2_RPF_TAG_PCP_MASK (0x00000007 << HW_ATL2_RPF_TAG_PCP_OFFSET)
+
+#define HW_ATL2_RPF_TAG_BASE_UC BIT(HW_ATL2_RPF_TAG_UC_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_ALLMC BIT(HW_ATL2_RPF_TAG_ALLMC_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_UNTAG BIT(HW_ATL2_RPF_TAG_UNTAG_OFFSET)
+#define HW_ATL2_RPF_TAG_BASE_VLAN BIT(HW_ATL2_RPF_TAG_VLAN_OFFSET)
+
+enum HW_ATL2_RPF_ART_INDEX {
+ HW_ATL2_RPF_L2_PROMISC_OFF_INDEX,
+ HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX,
+ HW_ATL2_RPF_L3L4_USER_INDEX = 8,
+ HW_ATL2_RPF_ET_PCP_USER_INDEX = HW_ATL2_RPF_L3L4_USER_INDEX + 16,
+ HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16,
+ HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX +
+ HW_ATL_VLAN_MAX_FILTERS,
+};
+
+#define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \
+ ((((ACTION) & 0x3U) << 8) | \
+ (((RSS) & 0x1U) << 7) | \
+ (((INDEX) & 0x3FU) << 2) | \
+ (((VALID) & 0x1U) << 0))
+
+#define HW_ATL2_ACTION_DROP HW_ATL2_ACTION(0, 0, 0, 1)
+#define HW_ATL2_ACTION_DISABLE HW_ATL2_ACTION(0, 0, 0, 0)
+#define HW_ATL2_ACTION_ASSIGN_QUEUE(QUEUE) HW_ATL2_ACTION(1, 0, (QUEUE), 1)
+#define HW_ATL2_ACTION_ASSIGN_TC(TC) HW_ATL2_ACTION(1, 1, (TC), 1)
+
+enum HW_ATL2_RPF_RSS_HASH_TYPE {
+ HW_ATL2_RPF_RSS_HASH_TYPE_NONE = 0,
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 = BIT(0),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP = BIT(1),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP = BIT(2),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 = BIT(3),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP = BIT(4),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP = BIT(5),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX = BIT(6),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP = BIT(7),
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP = BIT(8),
+ HW_ATL2_RPF_RSS_HASH_TYPE_ALL = HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP |
+ HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP,
+};
+
+#define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU
+
+struct hw_atl2_priv {
+ struct statistics_s last_stats;
+ unsigned int art_base_index;
+};
+
+#endif /* HW_ATL2_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c
new file mode 100644
index 000000000000..cd954b11d24a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "hw_atl2_llh.h"
+#include "hw_atl2_llh_internal.h"
+#include "aq_hw_utils.h"
+
+void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
+ u32 select)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR,
+ HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK,
+ HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT, select);
+}
+
+void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR,
+ HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK,
+ HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT,
+ rss_hash_type);
+}
+
+/* rpf */
+
+void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_NEW_EN_ADR,
+ HW_ATL2_RPF_NEW_EN_MSK,
+ HW_ATL2_RPF_NEW_EN_SHIFT,
+ enable);
+}
+
+void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPFL2UC_TAG_ADR(filter),
+ HW_ATL2_RPFL2UC_TAG_MSK,
+ HW_ATL2_RPFL2UC_TAG_SHIFT,
+ tag);
+}
+
+void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_L2_BC_TAG_ADR,
+ HW_ATL2_RPF_L2_BC_TAG_MSK,
+ HW_ATL2_RPF_L2_BC_TAG_SHIFT,
+ tag);
+}
+
+void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
+ u32 queue)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_RSS_REDIR_ADR(tc, index),
+ HW_ATL2_RPF_RSS_REDIR_MSK(tc),
+ HW_ATL2_RPF_RSS_REDIR_SHIFT(tc),
+ queue);
+}
+
+void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_VL_TAG_ADR(filter),
+ HW_ATL2_RPF_VL_TAG_MSK,
+ HW_ATL2_RPF_VL_TAG_SHIFT,
+ tag);
+}
+
+/* TX */
+
+void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
+ const u32 tc_q_rand_map_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR,
+ HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK,
+ HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT,
+ tc_q_rand_map_en);
+}
+
+void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR,
+ HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK,
+ HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT,
+ clk_gate_en);
+}
+
+void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue),
+ tx_intr_moderation_ctl);
+}
+
+void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ const u32 data_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT,
+ data_arb_mode);
+}
+
+void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
+}
+
+void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL2_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT,
+ weight);
+}
+
+u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_FPGA_VER_ADR);
+}
+
+void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw)
+{
+ u32 hw_ver = hw_atl2_get_hw_version(aq_hw);
+
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_LT_CTRL_ADR,
+ HW_ATL2_LT_CTRL_CLK_RATIO_MSK,
+ HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT,
+ hw_ver < HW_ATL2_FPGA_VER_U32(1, 0, 0, 0) ?
+ HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED :
+ hw_ver >= HW_ATL2_FPGA_VER_U32(1, 0, 85, 2) ?
+ HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED :
+ HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED);
+}
+
+/* set action resolver record */
+void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 tag, u32 mask, u32 action)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(location),
+ tag);
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(location),
+ mask);
+ aq_hw_write_reg(aq_hw,
+ HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(location),
+ action);
+}
+
+void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_REC_TAB_EN_ADR,
+ HW_ATL2_RPF_REC_TAB_EN_MSK,
+ HW_ATL2_RPF_REC_TAB_EN_SHIFT,
+ sections);
+}
+
+void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ data[j] = aq_hw_read_reg(aq_hw,
+ HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i));
+}
+
+void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ aq_hw_write_reg(aq_hw, HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i),
+ data[j]);
+}
+
+void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len)
+{
+ int j = 0;
+ int i;
+
+ for (i = offset; i < offset + len; i++, j++)
+ data[j] = aq_hw_read_reg(aq_hw,
+ HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(i));
+}
+
+void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR,
+ HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK,
+ HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT,
+ finish);
+}
+
+u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL2_MIF_MCP_FINISHED_READ_ADR,
+ HW_ATL2_MIF_MCP_FINISHED_READ_MSK,
+ HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT);
+}
+
+u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR);
+}
+
+void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR, val);
+}
+
+u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_ADR);
+}
+
+void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR,
+ val);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h
new file mode 100644
index 000000000000..98c7a4621297
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_LLH_H
+#define HW_ATL2_LLH_H
+
+#include <linux/types.h>
+
+struct aq_hw_s;
+
+/* Set TX Interrupt Moderation Control Register */
+void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
+
+/* Set Redirection Table 2 Select */
+void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
+ u32 select);
+
+/** Set RSS HASH type */
+void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type);
+
+/* set new RPF enable */
+void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable);
+
+/* set l2 unicast filter tag */
+void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
+
+/* set l2 broadcast filter tag */
+void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag);
+
+/* set new rss redirection table */
+void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
+ u32 queue);
+
+/* Set VLAN filter tag */
+void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
+
+/* set tx random TC-queue mapping enable bit */
+void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
+ const u32 tc_q_rand_map_en);
+
+/* set tx buffer clock gate enable */
+void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en);
+
+void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ const u32 data_arb_mode);
+
+/* set tx packet scheduler tc data max credit */
+void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 max_credit);
+
+/* set tx packet scheduler tc data weight */
+void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ const u32 tc,
+ const u32 weight);
+
+u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw);
+
+void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw);
+
+/* set action resolver record */
+void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 tag, u32 mask, u32 action);
+
+/* set enable action resolver section */
+void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections);
+
+/* get data from firmware shared input buffer */
+void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* set data into firmware shared input buffer */
+void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* get data from firmware shared output buffer */
+void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
+ int len);
+
+/* set host finished write shared buffer indication */
+void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish);
+
+/* get mcp finished read shared buffer indication */
+u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw);
+
+/* get mcp boot register */
+u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw);
+
+/* set mcp boot register */
+void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val);
+
+/* get host interrupt request */
+u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw);
+
+/* clear host interrupt request */
+void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val);
+
+#endif /* HW_ATL2_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h
new file mode 100644
index 000000000000..e34c5cda061e
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_LLH_INTERNAL_H
+#define HW_ATL2_LLH_INTERNAL_H
+
+/* RX pif_rpf_redir_2_en_i Bitfield Definitions
+ * PORT="pif_rpf_redir_2_en_i"
+ */
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR 0x000054C8
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK 0x00001000
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSKN 0xFFFFEFFF
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT 12
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_WIDTH 1
+#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_DEFAULT 0x0
+
+/* RX pif_rpf_rss_hash_type_i Bitfield Definitions
+ */
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK 0x000001FF
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSKN 0xFFFFFE00
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT 0
+#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_WIDTH 9
+
+/* rx rpf_new_rpf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_new_rpf_en_i".
+ * port="pif_rpf_new_rpf_en_i
+ */
+
+/* register address for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_ADR 0x00005104
+/* bitmask for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_MSK 0x00000800
+/* inverted bitmask for bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_MSKN 0xfffff7ff
+/* lower bit position of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_SHIFT 11
+/* width of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_WIDTH 1
+/* default value of bitfield rpf_new_rpf_en */
+#define HW_ATL2_RPF_NEW_EN_DEFAULT 0x0
+
+/* rx l2_uc_req_tag0{f}[5:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_req_tag0{f}[7:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_req_tag0[5:0]"
+ */
+
+/* register address for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_ADR(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_MSK 0x0FC00000
+/* inverted bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_MSKN 0xF03FFFFF
+/* lower bit position of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_SHIFT 22
+/* width of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_WIDTH 6
+/* default value of bitfield l2_uc_req_tag0{f}[2:0] */
+#define HW_ATL2_RPFL2UC_TAG_DEFAULT 0x0
+
+/* rpf_l2_bc_req_tag[5:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_l2_bc_req_tag[5:0]".
+ * port="pifrpf_l2_bc_req_tag_i[5:0]"
+ */
+
+/* register address for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_ADR 0x000050F0
+/* bitmask for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_MSK 0x0000003F
+/* inverted bitmask for bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_MSKN 0xffffffc0
+/* lower bit position of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_SHIFT 0
+/* width of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_WIDTH 6
+/* default value of bitfield rpf_l2_bc_req_tag */
+#define HW_ATL2_RPF_L2_BC_TAG_DEFAULT 0x0
+
+/* rx rpf_rss_red1_data_[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rpf_rss_red1_data[4:0]".
+ * port="pif_rpf_rss_red1_data_i[4:0]"
+ */
+
+/* register address for bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_ADR(TC, INDEX) (0x00006200 + \
+ (0x100 * !!((TC) > 3)) + (INDEX) * 4)
+/* bitmask for bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_MSK(TC) (0x00000001F << (5 * ((TC) % 4)))
+/* lower bit position of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_SHIFT(TC) (5 * ((TC) % 4))
+/* width of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_WIDTH 5
+/* default value of bitfield rpf_rss_red1_data[4:0] */
+#define HW_ATL2_RPF_RSS_REDIR_DEFAULT 0x0
+
+/* rx vlan_req_tag0{f}[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vlan_req_tag0{f}[3:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_vlan_req_tag0[3:0]"
+ */
+
+/* register address for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* bitmask for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_MSK 0x0000F000
+/* inverted bitmask for bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_MSKN 0xFFFF0FFF
+/* lower bit position of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_SHIFT 12
+/* width of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_WIDTH 4
+/* default value of bitfield vlan_req_tag0{f}[3:0] */
+#define HW_ATL2_RPF_VL_TAG_DEFAULT 0x0
+
+/* RX rx_q{Q}_tc_map[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rx_q{Q}_tc_map[2:0]".
+ * Parameter: Queue {Q} | bit-level stride | range [0, 31]
+ * PORT="pif_rx_q0_tc_map_i[2:0]"
+ */
+
+/* Register address for bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_ADR(queue) \
+ (((queue) < 32) ? 0x00005900 + ((queue) / 8) * 4 : 0)
+/* Lower bit position of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_SHIFT(queue) \
+ (((queue) < 32) ? ((queue) * 4) % 32 : 0)
+/* Width of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_WIDTH 3
+/* Default value of bitfield rx_q{Q}_tc_map[2:0] */
+#define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0
+
+/* tx tx_tc_q_rand_map_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_tc_q_rand_map_en".
+ * port="pif_tpb_tx_tc_q_rand_map_en_i"
+ */
+
+/* register address for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR 0x00007900
+/* bitmask for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK 0x00000200
+/* inverted bitmask for bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSKN 0xFFFFFDFF
+/* lower bit position of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT 9
+/* width of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_WIDTH 1
+/* default value of bitfield tx_tc_q_rand_map_en */
+#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_DEFAULT 0x0
+
+/* tx tx_buffer_clk_gate_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buffer_clk_gate_en".
+ * port="pif_tpb_tx_buffer_clk_gate_en_i"
+ */
+
+/* register address for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR 0x00007900
+/* bitmask for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK 0x00000020
+/* inverted bitmask for bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSKN 0xffffffdf
+/* lower bit position of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT 5
+/* width of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_WIDTH 1
+/* default value of bitfield tx_buffer_clk_gate_en */
+#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0
+
+/* tx tx_q_tc_map{q} bitfield definitions
+ * preprocessor definitions for the bitfield "tx_q_tc_map{q}".
+ * parameter: queue {q} | bit-level stride | range [0, 31]
+ * port="pif_tpb_tx_q_tc_map0_i[2:0]"
+ */
+
+/* register address for bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_ADR(queue) \
+ (((queue) < 32) ? 0x0000799C + ((queue) / 4) * 4 : 0)
+/* lower bit position of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_SHIFT(queue) \
+ (((queue) < 32) ? ((queue) * 8) % 32 : 0)
+/* width of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_WIDTH 3
+/* default value of bitfield tx_q_tc_map{q} */
+#define HW_ATL2_TX_Q_TC_MAP_DEFAULT 0x0
+
+/* tx data_tc_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc_arb_mode".
+ * port="pif_tps_data_tc_arb_mode_i"
+ */
+
+/* register address for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
+/* bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK 0x00000003
+/* inverted bitmask for bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffc
+/* lower bit position of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT 0
+/* width of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_WIDTH 2
+/* default value of bitfield data_tc_arb_mode */
+#define HW_ATL2_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
+
+/* tx data_tc{t}_credit_max[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[f:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[15:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0xffff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0x0000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
+/* width of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 16
+/* default value of bitfield data_tc{t}_credit_max[f:0] */
+#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
+
+/* tx data_tc{t}_weight[e:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[e:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[14:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x00007fff
+/* inverted bitmask for bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xffff8000
+/* lower bit position of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0
+/* width of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 15
+/* default value of bitfield data_tc{t}_weight[e:0] */
+#define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00007c28
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue) (0x00007c28u + (queue) * 0x40)
+
+/* Launch time control register */
+#define HW_ATL2_LT_CTRL_ADR 0x00007a1c
+
+#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_MSK 0xFFFF0000
+#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_SHIFT 16
+
+#define HW_ATL2_LT_CTRL_CLK_RATIO_MSK 0x0000FF00
+#define HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT 8
+#define HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED 4
+#define HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED 2
+#define HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED 1
+
+#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_MSK 0x00000008
+#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_SHIFT 3
+
+#define HW_ATL2_LT_CTRL_LINK_SPEED_MSK 0x00000007
+#define HW_ATL2_LT_CTRL_LINK_SPEED_SHIFT 0
+
+/* FPGA VER register */
+#define HW_ATL2_FPGA_VER_ADR 0x000000f4
+#define HW_ATL2_FPGA_VER_U32(mj, mi, bl, rv) \
+ ((((mj) & 0xff) << 24) | \
+ (((mi) & 0xff) << 16) | \
+ (((bl) & 0xff) << 8) | \
+ (((rv) & 0xff) << 0))
+
+/* ahb_mem_addr{f}[31:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "ahb_mem_addr{f}[31:0]".
+ * Parameter: filter {f} | stride size 0x10 | range [0, 127]
+ * PORT="ahb_mem_addr{f}[31:0]"
+ */
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(filter) \
+ (0x00014000u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSKN 0x00000000u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_WIDTH 31
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_DEFAULT 0x0
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(filter) \
+ (0x00014004u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSKN 0x00000000u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_WIDTH 31
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_DEFAULT 0x0
+
+/* Register address for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(filter) \
+ (0x00014008u + (filter) * 0x10)
+/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSK 0x000007FFu
+/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSKN 0xFFFFF800u
+/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_SHIFT 0
+/* Width of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_WIDTH 10
+/* Default value of bitfield ahb_mem_addr{f}[31:0] */
+#define HW_ATL2_RPF_ACT_RSLVR_ACTN_DEFAULT 0x0
+
+/* rpf_rec_tab_en[15:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rpf_rec_tab_en[15:0]".
+ * PORT="pif_rpf_rec_tab_en[15:0]"
+ */
+/* Register address for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_ADR 0x00006ff0u
+/* Bitmask for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_SHIFT 0
+/* Width of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_WIDTH 16
+/* Default value of bitfield rpf_rec_tab_en[15:0] */
+#define HW_ATL2_RPF_REC_TAB_EN_DEFAULT 0x0
+
+/* Register address for firmware shared input buffer */
+#define HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(dword) (0x00012000U + (dword) * 0x4U)
+/* Register address for firmware shared output buffer */
+#define HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(dword) (0x00013000U + (dword) * 0x4U)
+
+/* pif_host_finished_buf_wr_i Bitfield Definitions
+ * Preprocessor definitions for the bitfield "pif_host_finished_buf_wr_i".
+ * PORT="pif_host_finished_buf_wr_i"
+ */
+/* Register address for bitfield rpif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR 0x00000e00u
+/* Bitmask for bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK 0x00000001u
+/* Inverted bitmask for bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSKN 0xFFFFFFFEu
+/* Lower bit position of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT 0
+/* Width of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_WIDTH 1
+/* Default value of bitfield pif_host_finished_buf_wr_i */
+#define HW_ATL2_MIF_HOST_FINISHED_WRITE_DEFAULT 0x0
+
+/* pif_mcp_finished_buf_rd_i Bitfield Definitions
+ * Preprocessor definitions for the bitfield "pif_mcp_finished_buf_rd_i".
+ * PORT="pif_mcp_finished_buf_rd_i"
+ */
+/* Register address for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_ADR 0x00000e04u
+/* Bitmask for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_MSK 0x00000001u
+/* Inverted bitmask for bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_MSKN 0xFFFFFFFEu
+/* Lower bit position of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT 0
+/* Width of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_WIDTH 1
+/* Default value of bitfield pif_mcp_finished_buf_rd_i */
+#define HW_ATL2_MIF_MCP_FINISHED_READ_DEFAULT 0x0
+
+/* Register address for bitfield pif_mcp_boot_reg */
+#define HW_ATL2_MIF_BOOT_REG_ADR 0x00003040u
+
+#define HW_ATL2_MCP_HOST_REQ_INT_READY BIT(0)
+
+#define HW_ATL2_MCP_HOST_REQ_INT_ADR 0x00000F00u
+#define HW_ATL2_MCP_HOST_REQ_INT_SET_ADR 0x00000F04u
+#define HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR 0x00000F08u
+
+#endif /* HW_ATL2_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
new file mode 100644
index 000000000000..f3766780e975
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/iopoll.h>
+
+#include "aq_hw_utils.h"
+#include "hw_atl/hw_atl_utils.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_llh_internal.h"
+
+#define HW_ATL2_FW_VER_1X 0x01000000U
+
+#define AQ_A2_BOOT_STARTED BIT(0x18)
+#define AQ_A2_CRASH_INIT BIT(0x1B)
+#define AQ_A2_BOOT_CODE_FAILED BIT(0x1C)
+#define AQ_A2_FW_INIT_FAILED BIT(0x1D)
+#define AQ_A2_FW_INIT_COMP_SUCCESS BIT(0x1F)
+
+#define AQ_A2_FW_BOOT_FAILED_MASK (AQ_A2_CRASH_INIT | \
+ AQ_A2_BOOT_CODE_FAILED | \
+ AQ_A2_FW_INIT_FAILED)
+#define AQ_A2_FW_BOOT_COMPLETE_MASK (AQ_A2_FW_BOOT_FAILED_MASK | \
+ AQ_A2_FW_INIT_COMP_SUCCESS)
+
+#define AQ_A2_FW_BOOT_REQ_REBOOT BIT(0x0)
+#define AQ_A2_FW_BOOT_REQ_HOST_BOOT BIT(0x8)
+#define AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT BIT(0xA)
+#define AQ_A2_FW_BOOT_REQ_PHY_FAST_BOOT BIT(0xB)
+
+int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+{
+ int err;
+
+ self->fw_ver_actual = hw_atl2_utils_get_fw_version(self);
+
+ if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_a2_fw_ops;
+ } else {
+ aq_pr_err("Bad FW version detected: %x, but continue\n",
+ self->fw_ver_actual);
+ *fw_ops = &aq_a2_fw_ops;
+ }
+ aq_pr_trace("Detect ATL2FW %x\n", self->fw_ver_actual);
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+
+ self->chip_features |= ATL_HW_CHIP_ANTIGUA;
+
+ return err;
+}
+
+static bool hw_atl2_mcp_boot_complete(struct aq_hw_s *self)
+{
+ u32 rbl_status;
+
+ rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
+ if (rbl_status & AQ_A2_FW_BOOT_COMPLETE_MASK)
+ return true;
+
+ /* Host boot requested */
+ if (hw_atl2_mif_host_req_int_get(self) & HW_ATL2_MCP_HOST_REQ_INT_READY)
+ return true;
+
+ return false;
+}
+
+int hw_atl2_utils_soft_reset(struct aq_hw_s *self)
+{
+ bool rbl_complete = false;
+ u32 rbl_status = 0;
+ u32 rbl_request;
+ int err;
+
+ hw_atl2_mif_host_req_int_clr(self, 0x01);
+ rbl_request = AQ_A2_FW_BOOT_REQ_REBOOT;
+#ifdef AQ_CFG_FAST_START
+ rbl_request |= AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT;
+#endif
+ hw_atl2_mif_mcp_boot_reg_set(self, rbl_request);
+
+ /* Wait for RBL boot */
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_boot_reg_get, self,
+ rbl_status,
+ ((rbl_status & AQ_A2_BOOT_STARTED) &&
+ (rbl_status != 0xFFFFFFFFu)),
+ 10, 200000);
+ if (err) {
+ aq_pr_err("Boot code hanged");
+ goto err_exit;
+ }
+
+ err = readx_poll_timeout_atomic(hw_atl2_mcp_boot_complete, self,
+ rbl_complete,
+ rbl_complete,
+ 10, 2000000);
+
+ if (err) {
+ aq_pr_err("FW Restart timed out");
+ goto err_exit;
+ }
+
+ rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
+
+ if (rbl_status & AQ_A2_FW_BOOT_FAILED_MASK) {
+ err = -EIO;
+ aq_pr_err("FW Restart failed");
+ goto err_exit;
+ }
+
+ if (hw_atl2_mif_host_req_int_get(self) &
+ HW_ATL2_MCP_HOST_REQ_INT_READY) {
+ err = -EIO;
+ aq_pr_err("No FW detected. Dynamic FW load not implemented");
+ goto err_exit;
+ }
+
+ if (self->aq_fw_ops) {
+ err = self->aq_fw_ops->init(self);
+ if (err) {
+ aq_pr_err("FW Init failed");
+ goto err_exit;
+ }
+ }
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
new file mode 100644
index 000000000000..b66fa346581c
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef HW_ATL2_UTILS_H
+#define HW_ATL2_UTILS_H
+
+#include "aq_hw.h"
+
+/* F W A P I */
+
+struct link_options_s {
+ u8 link_up:1;
+ u8 link_renegotiate:1;
+ u8 minimal_link_speed:1;
+ u8 internal_loopback:1;
+ u8 external_loopback:1;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 eee_5G:1;
+ u8 eee_10G:1;
+ u8 rsvd3:3;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 rsvd4:1;
+ u8 downshift:1;
+ u8 downshift_retry:4;
+};
+
+struct link_control_s {
+ u8 mode:4;
+ u8 disable_crc_corruption:1;
+ u8 discard_short_frames:1;
+ u8 flow_control_mode:1;
+ u8 disable_length_check:1;
+
+ u8 discard_errored_frames:1;
+ u8 control_frame_enable:1;
+ u8 enable_tx_padding:1;
+ u8 enable_crc_forwarding:1;
+ u8 enable_frame_padding_removal_rx: 1;
+ u8 promiscuous_mode: 1;
+ u8 rsvd:2;
+
+ u16 rsvd2;
+};
+
+struct thermal_shutdown_s {
+ u8 enable:1;
+ u8 warning_enable:1;
+ u8 rsvd:6;
+
+ u8 shutdown_temperature;
+ u8 cold_temperature;
+ u8 warning_temperature;
+};
+
+struct mac_address_s {
+ u8 mac_address[6];
+};
+
+struct mac_address_aligned_s {
+ struct mac_address_s aligned;
+ u16 rsvd;
+};
+
+struct sleep_proxy_s {
+ struct wake_on_lan_s {
+ u8 wake_on_magic_packet:1;
+ u8 wake_on_pattern:1;
+ u8 wake_on_link_up:1;
+ u8 wake_on_link_down:1;
+ u8 wake_on_ping:1;
+ u8 wake_on_timer:1;
+ u8 rsvd:2;
+
+ u8 rsvd2;
+ u16 rsvd3;
+
+ u32 link_up_timeout;
+ u32 link_down_timeout;
+ u32 timer;
+ } wake_on_lan;
+
+ struct {
+ u32 mask[4];
+ u32 crc32;
+ } wake_up_pattern[8];
+
+ struct __packed {
+ u8 arp_responder:1;
+ u8 echo_responder:1;
+ u8 igmp_client:1;
+ u8 echo_truncate:1;
+ u8 address_guard:1;
+ u8 ignore_fragmented:1;
+ u8 rsvd:2;
+
+ u16 echo_max_len;
+ u8 rsvd2;
+ } ipv4_offload;
+
+ u32 ipv4_offload_addr[8];
+ u32 reserved[8];
+
+ struct __packed {
+ u8 ns_responder:1;
+ u8 echo_responder:1;
+ u8 mld_client:1;
+ u8 echo_truncate:1;
+ u8 address_guard:1;
+ u8 rsvd:3;
+
+ u16 echo_max_len;
+ u8 rsvd2;
+ } ipv6_offload;
+
+ u32 ipv6_offload_addr[16][4];
+
+ struct {
+ u16 port[16];
+ } tcp_port_offload;
+
+ struct {
+ u16 port[16];
+ } udp_port_offload;
+
+ struct {
+ u32 retry_count;
+ u32 retry_interval;
+ } ka4_offload;
+
+ struct {
+ u32 timeout;
+ u16 local_port;
+ u16 remote_port;
+ u8 remote_mac_addr[6];
+ u16 rsvd;
+ u32 rsvd2;
+ u32 rsvd3;
+ u16 rsvd4;
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ u32 local_ip;
+ u32 remote_ip;
+ } ka4_connection[16];
+
+ struct {
+ u32 retry_count;
+ u32 retry_interval;
+ } ka6_offload;
+
+ struct {
+ u32 timeout;
+ u16 local_port;
+ u16 remote_port;
+ u8 remote_mac_addr[6];
+ u16 rsvd;
+ u32 rsvd2;
+ u32 rsvd3;
+ u16 rsvd4;
+ u16 win_size;
+ u32 seq_num;
+ u32 ack_num;
+ u32 local_ip[4];
+ u32 remote_ip[4];
+ } ka6_connection[16];
+
+ struct {
+ u32 rr_count;
+ u32 rr_buf_len;
+ u32 idx_offset;
+ u32 rr__offset;
+ } mdns_offload;
+};
+
+struct pause_quanta_s {
+ u16 quanta_10M;
+ u16 threshold_10M;
+ u16 quanta_100M;
+ u16 threshold_100M;
+ u16 quanta_1G;
+ u16 threshold_1G;
+ u16 quanta_2P5G;
+ u16 threshold_2P5G;
+ u16 quanta_5G;
+ u16 threshold_5G;
+ u16 quanta_10G;
+ u16 threshold_10G;
+};
+
+struct data_buffer_status_s {
+ u32 data_offset;
+ u32 data_length;
+};
+
+struct device_caps_s {
+ u8 finite_flashless:1;
+ u8 cable_diag:1;
+ u8 ncsi:1;
+ u8 avb:1;
+ u8 rsvd:4;
+
+ u8 rsvd2;
+ u16 rsvd3;
+ u32 rsvd4;
+};
+
+struct version_s {
+ struct bundle_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } bundle;
+ struct mac_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } mac;
+ struct phy_version_t {
+ u8 major;
+ u8 minor;
+ u16 build;
+ } phy;
+ u32 rsvd;
+};
+
+struct link_status_s {
+ u8 link_state:4;
+ u8 link_rate:4;
+
+ u8 pause_tx:1;
+ u8 pause_rx:1;
+ u8 eee:1;
+ u8 duplex:1;
+ u8 rsvd:4;
+
+ u16 rsvd2;
+};
+
+struct wol_status_s {
+ u8 wake_count;
+ u8 wake_reason;
+
+ u16 wake_up_packet_length :12;
+ u16 wake_up_pattern_number :3;
+ u16 rsvd:1;
+
+ u32 wake_up_packet[379];
+};
+
+struct mac_health_monitor_s {
+ u8 mac_ready:1;
+ u8 mac_fault:1;
+ u8 mac_flashless_finished:1;
+ u8 rsvd:5;
+
+ u8 mac_temperature;
+ u16 mac_heart_beat;
+ u16 mac_fault_code;
+ u16 rsvd2;
+};
+
+struct phy_health_monitor_s {
+ u8 phy_ready:1;
+ u8 phy_fault:1;
+ u8 phy_hot_warning:1;
+ u8 rsvd:5;
+
+ u8 phy_temperature;
+ u16 phy_heart_beat;
+ u16 phy_fault_code;
+ u16 rsvd2;
+};
+
+struct device_link_caps_s {
+ u8 rsvd:3;
+ u8 internal_loopback:1;
+ u8 external_loopback:1;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 rsvd3:1;
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 rsvd4:1;
+ u8 eee_5G:1;
+ u8 rsvd5:1;
+ u8 eee_10G:1;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 pfc:1;
+ u8 downshift:1;
+ u8 downshift_retry:4;
+};
+
+struct sleep_proxy_caps_s {
+ u8 ipv4_offload:1;
+ u8 ipv6_offload:1;
+ u8 tcp_port_offload:1;
+ u8 udp_port_offload:1;
+ u8 ka4_offload:1;
+ u8 ka6_offload:1;
+ u8 mdns_offload:1;
+ u8 wake_on_ping:1;
+
+ u8 wake_on_magic_packet:1;
+ u8 wake_on_pattern:1;
+ u8 wake_on_timer:1;
+ u8 wake_on_link:1;
+ u8 wake_patterns_count:4;
+
+ u8 ipv4_count;
+ u8 ipv6_count;
+
+ u8 tcp_port_offload_count;
+ u8 udp_port_offload_count;
+
+ u8 tcp4_ka_count;
+ u8 tcp6_ka_count;
+
+ u8 igmp_offload:1;
+ u8 mld_offload:1;
+ u8 rsvd:6;
+
+ u8 rsvd2;
+ u16 rsvd3;
+};
+
+struct lkp_link_caps_s {
+ u8 rsvd:5;
+ u8 rate_10M_hd:1;
+ u8 rate_100M_hd:1;
+ u8 rate_1G_hd:1;
+
+ u8 rate_10M:1;
+ u8 rate_100M:1;
+ u8 rate_1G:1;
+ u8 rate_2P5G:1;
+ u8 rate_N2P5G:1;
+ u8 rate_5G:1;
+ u8 rate_N5G:1;
+ u8 rate_10G:1;
+
+ u8 rsvd2:1;
+ u8 eee_100M:1;
+ u8 eee_1G:1;
+ u8 eee_2P5G:1;
+ u8 rsvd3:1;
+ u8 eee_5G:1;
+ u8 rsvd4:1;
+ u8 eee_10G:1;
+
+ u8 pause_rx:1;
+ u8 pause_tx:1;
+ u8 rsvd5:6;
+};
+
+struct core_dump_s {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+
+ u32 hi;
+ u32 lo;
+
+ u32 regs[32];
+};
+
+struct trace_s {
+ u32 sync_counter;
+ u32 mem_buffer[0x1ff];
+};
+
+struct cable_diag_control_s {
+ u8 toggle :1;
+ u8 rsvd:7;
+
+ u8 wait_timeout_sec;
+ u16 rsvd2;
+};
+
+struct cable_diag_lane_data_s {
+ u8 result_code;
+ u8 dist;
+ u8 far_dist;
+ u8 rsvd;
+};
+
+struct cable_diag_status_s {
+ struct cable_diag_lane_data_s lane_data[4];
+ u8 transact_id;
+ u8 status:4;
+ u8 rsvd:4;
+ u16 rsvd2;
+};
+
+struct statistics_s {
+ struct {
+ u32 link_up;
+ u32 link_down;
+ } link;
+
+ struct {
+ u64 tx_unicast_octets;
+ u64 tx_multicast_octets;
+ u64 tx_broadcast_octets;
+ u64 rx_unicast_octets;
+ u64 rx_multicast_octets;
+ u64 rx_broadcast_octets;
+
+ u32 tx_unicast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_errors;
+
+ u32 rx_unicast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_dropped_frames;
+ u32 rx_error_frames;
+
+ u32 tx_good_frames;
+ u32 rx_good_frames;
+ u32 reserve_fw_gap;
+ } msm;
+ u32 main_loop_cycles;
+ u32 reserve_fw_gap;
+};
+
+struct filter_caps_s {
+ u8 l2_filters_base_index:6;
+ u8 flexible_filter_mask:2;
+ u8 l2_filter_count;
+ u8 ethertype_filter_base_index;
+ u8 ethertype_filter_count;
+
+ u8 vlan_filter_base_index;
+ u8 vlan_filter_count;
+ u8 l3_ip4_filter_base_index:4;
+ u8 l3_ip4_filter_count:4;
+ u8 l3_ip6_filter_base_index:4;
+ u8 l3_ip6_filter_count:4;
+
+ u8 l4_filter_base_index:4;
+ u8 l4_filter_count:4;
+ u8 l4_flex_filter_base_index:4;
+ u8 l4_flex_filter_count:4;
+ u8 rslv_tbl_base_index;
+ u8 rslv_tbl_count;
+};
+
+struct request_policy_s {
+ struct {
+ u8 all:1;
+ u8 mcast:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } promisc;
+
+ struct {
+ u8 accept:1;
+ u8 rsvd:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } bcast;
+
+ struct {
+ u8 accept:1;
+ u8 rsvd:1;
+ u8 rx_queue_tc_index:5;
+ u8 queue_or_tc:1;
+ } mcast;
+
+ u8 rsvd:8;
+};
+
+struct fw_interface_in {
+ u32 mtu;
+ u32 rsvd1;
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ u32 rsvd2;
+ struct link_options_s link_options;
+ u32 rsvd3;
+ struct thermal_shutdown_s thermal_shutdown;
+ u32 rsvd4;
+ struct sleep_proxy_s sleep_proxy;
+ u32 rsvd5;
+ struct pause_quanta_s pause_quanta[8];
+ struct cable_diag_control_s cable_diag_control;
+ u32 rsvd6;
+ struct data_buffer_status_s data_buffer_status;
+ u32 rsvd7;
+ struct request_policy_s request_policy;
+};
+
+struct transaction_counter_s {
+ u16 transaction_cnt_a;
+ u16 transaction_cnt_b;
+};
+
+struct management_status_s {
+ struct mac_address_s mac_address;
+ u16 vlan;
+
+ struct{
+ u32 enable : 1;
+ u32 rsvd:31;
+ } flags;
+
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+struct fw_interface_out {
+ struct transaction_counter_s transaction_id;
+ struct version_s version;
+ struct link_status_s link_status;
+ struct wol_status_s wol_status;
+ u32 rsvd;
+ u32 rsvd2;
+ struct mac_health_monitor_s mac_health_monitor;
+ u32 rsvd3;
+ u32 rsvd4;
+ struct phy_health_monitor_s phy_health_monitor;
+ u32 rsvd5;
+ u32 rsvd6;
+ struct cable_diag_status_s cable_diag_status;
+ u32 rsvd7;
+ struct device_link_caps_s device_link_caps;
+ u32 rsvd8;
+ struct sleep_proxy_caps_s sleep_proxy_caps;
+ u32 rsvd9;
+ struct lkp_link_caps_s lkp_link_caps;
+ u32 rsvd10;
+ struct core_dump_s core_dump;
+ u32 rsvd11;
+ struct statistics_s stats;
+ u32 rsvd12;
+ struct filter_caps_s filter_caps;
+ struct device_caps_s device_caps;
+ u32 rsvd13;
+ struct management_status_s management_status;
+ u32 reserve[21];
+ struct trace_s trace;
+};
+
+#define AQ_A2_FW_LINK_RATE_INVALID 0
+#define AQ_A2_FW_LINK_RATE_10M 1
+#define AQ_A2_FW_LINK_RATE_100M 2
+#define AQ_A2_FW_LINK_RATE_1G 3
+#define AQ_A2_FW_LINK_RATE_2G5 4
+#define AQ_A2_FW_LINK_RATE_5G 5
+#define AQ_A2_FW_LINK_RATE_10G 6
+
+#define AQ_HOST_MODE_INVALID 0U
+#define AQ_HOST_MODE_ACTIVE 1U
+#define AQ_HOST_MODE_SLEEP_PROXY 2U
+#define AQ_HOST_MODE_LOW_POWER 3U
+#define AQ_HOST_MODE_SHUTDOWN 4U
+
+int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
+
+u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self);
+
+int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
+ u8 *base_index, u8 *count);
+
+extern const struct aq_fw_ops aq_a2_fw_ops;
+
+#endif /* HW_ATL2_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
new file mode 100644
index 000000000000..0ffc33bd67d0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/iopoll.h>
+
+#include "aq_hw.h"
+#include "aq_hw_utils.h"
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl2_utils.h"
+#include "hw_atl2_llh.h"
+#include "hw_atl2_internal.h"
+
+#define AQ_A2_FW_READ_TRY_MAX 1000
+
+#define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \
+ hw_atl2_mif_shared_buf_write(HW,\
+ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32))
+
+#define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \
+ hw_atl2_mif_shared_buf_get(HW, \
+ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), \
+ sizeof(VARIABLE) / sizeof(u32))
+
+/* This should never be used on non atomic fields,
+ * treat any > u32 read as non atomic.
+ */
+#define hw_atl2_shared_buffer_read(HW, ITEM, VARIABLE) \
+{\
+ BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
+ sizeof(u32)) != 0,\
+ "Non aligned read " # ITEM);\
+ BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\
+ "Non atomic read " # ITEM);\
+ hw_atl2_mif_shared_buf_read(HW, \
+ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
+ (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
+}
+
+#define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \
+ hw_atl2_shared_buffer_read_block((HW), \
+ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
+ sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\
+ (DATA))
+
+static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
+ u32 offset, u32 dwords, void *data)
+{
+ struct transaction_counter_s tid1, tid2;
+ int cnt = 0;
+
+ do {
+ do {
+ hw_atl2_shared_buffer_read(self, transaction_id, tid1);
+ cnt++;
+ if (cnt > AQ_A2_FW_READ_TRY_MAX)
+ return -ETIME;
+ if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
+ udelay(1);
+ } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
+
+ hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
+
+ hw_atl2_shared_buffer_read(self, transaction_id, tid2);
+
+ cnt++;
+ if (cnt > AQ_A2_FW_READ_TRY_MAX)
+ return -ETIME;
+ } while (tid2.transaction_cnt_a != tid2.transaction_cnt_b ||
+ tid1.transaction_cnt_a != tid2.transaction_cnt_a);
+
+ return 0;
+}
+
+static inline int hw_atl2_shared_buffer_finish_ack(struct aq_hw_s *self)
+{
+ u32 val;
+ int err;
+
+ hw_atl2_mif_host_finished_write_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
+ self, val, val == 0U,
+ 100, 100000U);
+ WARN(err, "hw_atl2_shared_buffer_finish_ack");
+
+ return err;
+}
+
+static int aq_a2_fw_init(struct aq_hw_s *self)
+{
+ struct link_control_s link_control;
+ u32 mtu;
+ u32 val;
+ int err;
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_ACTIVE;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ hw_atl2_shared_buffer_get(self, mtu, mtu);
+ mtu = HW_ATL2_MTU_JUMBO;
+ hw_atl2_shared_buffer_write(self, mtu, mtu);
+
+ hw_atl2_mif_host_finished_write_set(self, 1U);
+ err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
+ self, val, val == 0U,
+ 100, 5000000U);
+ WARN(err, "hw_atl2_shared_buffer_finish_ack");
+
+ return err;
+}
+
+static int aq_a2_fw_deinit(struct aq_hw_s *self)
+{
+ struct link_control_s link_control;
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SHUTDOWN;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static void a2_link_speed_mask2fw(u32 speed,
+ struct link_options_s *link_options)
+{
+ link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
+ link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
+ link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+ link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
+ link_options->rate_N2P5G = link_options->rate_2P5G;
+ link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
+ link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M);
+ link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M);
+}
+
+static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.link_up = 1U;
+ a2_link_speed_mask2fw(speed, &link_options);
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+ switch (state) {
+ case MPI_INIT:
+ link_options.link_up = 1U;
+ break;
+ case MPI_DEINIT:
+ link_options.link_up = 0U;
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_update_link_status(struct aq_hw_s *self)
+{
+ struct link_status_s link_status;
+
+ hw_atl2_shared_buffer_read(self, link_status, link_status);
+
+ switch (link_status.link_rate) {
+ case AQ_A2_FW_LINK_RATE_10G:
+ self->aq_link_status.mbps = 10000;
+ break;
+ case AQ_A2_FW_LINK_RATE_5G:
+ self->aq_link_status.mbps = 5000;
+ break;
+ case AQ_A2_FW_LINK_RATE_2G5:
+ self->aq_link_status.mbps = 2500;
+ break;
+ case AQ_A2_FW_LINK_RATE_1G:
+ self->aq_link_status.mbps = 1000;
+ break;
+ case AQ_A2_FW_LINK_RATE_100M:
+ self->aq_link_status.mbps = 100;
+ break;
+ case AQ_A2_FW_LINK_RATE_10M:
+ self->aq_link_status.mbps = 10;
+ break;
+ default:
+ self->aq_link_status.mbps = 0;
+ }
+
+ return 0;
+}
+
+static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ struct mac_address_aligned_s mac_address;
+
+ hw_atl2_shared_buffer_get(self, mac_address, mac_address);
+ ether_addr_copy(mac, (u8 *)mac_address.aligned.mac_address);
+
+ return 0;
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct statistics_s stats;
+
+ hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+
+#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
+ stats.msm._F_ - priv->last_stats.msm._F_)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_error_frames);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+
+ AQ_SDELTA(ubrc, rx_unicast_octets);
+ AQ_SDELTA(ubtc, tx_unicast_octets);
+ AQ_SDELTA(mbrc, rx_multicast_octets);
+ AQ_SDELTA(mbtc, tx_multicast_octets);
+ AQ_SDELTA(bbrc, rx_broadcast_octets);
+ AQ_SDELTA(bbtc, tx_broadcast_octets);
+ }
+#undef AQ_SDELTA
+ self->curr_stats.dma_pkt_rc =
+ hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ self->curr_stats.dma_pkt_tc =
+ hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ self->curr_stats.dma_oct_rc =
+ hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ self->curr_stats.dma_oct_tc =
+ hw_atl_stats_tx_dma_good_octet_counter_get(self);
+ self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+ memcpy(&priv->last_stats, &stats, sizeof(stats));
+
+ return 0;
+}
+
+static int aq_a2_fw_renegotiate(struct aq_hw_s *self)
+{
+ struct link_options_s link_options;
+ int err;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.link_renegotiate = 1U;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ err = hw_atl2_shared_buffer_finish_ack(self);
+
+ /* We should put renegotiate status back to zero
+ * after command completes
+ */
+ link_options.link_renegotiate = 0U;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return err;
+}
+
+u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
+{
+ struct version_s version;
+
+ hw_atl2_shared_buffer_read_safe(self, version, &version);
+
+ /* A2 FW version is stored in reverse order */
+ return version.mac.major << 24 |
+ version.mac.minor << 16 |
+ version.mac.build;
+}
+
+int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
+ u8 *base_index, u8 *count)
+{
+ struct filter_caps_s filter_caps;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, filter_caps, &filter_caps);
+ if (err)
+ return err;
+
+ *base_index = filter_caps.rslv_tbl_base_index;
+ *count = filter_caps.rslv_tbl_count;
+ return 0;
+}
+
+const struct aq_fw_ops aq_a2_fw_ops = {
+ .init = aq_a2_fw_init,
+ .deinit = aq_a2_fw_deinit,
+ .reset = NULL,
+ .renegotiate = aq_a2_fw_renegotiate,
+ .get_mac_permanent = aq_a2_fw_get_mac_permanent,
+ .set_link_speed = aq_a2_fw_set_link_speed,
+ .set_state = aq_a2_fw_set_state,
+ .update_link_status = aq_a2_fw_update_link_status,
+ .update_stats = aq_a2_fw_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index fbe9d88b13c7..36c7cf05630a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -846,8 +846,7 @@ static int get_ingress_sakey_record(struct aq_hw_s *hw,
rec->key[7] = packed_record[14];
rec->key[7] |= packed_record[15] << 16;
- rec->key_len = (rec->key_len & 0xFFFFFFFC) |
- (packed_record[16] & 0x3);
+ rec->key_len = packed_record[16] & 0x3;
return 0;
}
@@ -1158,6 +1157,7 @@ static int set_egress_ctlf_record(struct aq_hw_s *hw,
packed_record[0] = rec->sa_da[0] & 0xFFFF;
packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+
packed_record[2] = rec->sa_da[1] & 0xFFFF;
packed_record[3] = rec->eth_type & 0xFFFF;
@@ -1552,7 +1552,7 @@ static int set_egress_sc_record(struct aq_hw_s *hw,
packed_record[5] |= (rec->sak_len & 0x3) << 4;
- packed_record[7] |= (rec->valid & 0x1) << 15;
+ packed_record[7] = (rec->valid & 0x1) << 15;
return set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSCRECORD + table_index);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 02b7705393ca..112edbd30823 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -871,13 +871,40 @@ static void ag71xx_mac_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_MII) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
+ ag71xx_is(ag, AR9340) ||
+ ag71xx_is(ag, QCA9530) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_GMII:
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_RMII:
+ if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
+ break;
+ goto unsupported;
+ case PHY_INTERFACE_MODE_RGMII:
+ if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ break;
+ goto unsupported;
+ default:
+ goto unsupported;
}
phylink_set(mask, MII);
@@ -889,6 +916,8 @@ static void ag71xx_mac_validate(struct phylink_config *config,
phylink_set(mask, 100baseT_Full);
if (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_RGMII ||
state->interface == PHY_INTERFACE_MODE_GMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
@@ -898,6 +927,10 @@ static void ag71xx_mac_validate(struct phylink_config *config,
__ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return;
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 00bd7bd55794..decab9a8e4a8 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1186,7 +1186,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
struct atl1c_hw *hw = &adapter->hw;
u32 mac, txq, rxq;
- hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false;
+ hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX;
hw->mac_speed = adapter->link_speed == SPEED_1000 ?
atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
@@ -2449,12 +2449,6 @@ static int atl1c_resume(struct device *dev)
atl1c_reset_mac(&adapter->hw);
atl1c_phy_init(&adapter->hw);
-#if 0
- AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
- pm_data &= ~PM_CTRLSTAT_PME_EN;
- AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
-#endif
-
netif_device_attach(netdev);
if (netif_running(netdev))
atl1c_up(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 271e7034fa70..b35fcfcd692d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1042,7 +1042,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional 40 bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct tx_packet_desc) * tpd_ring->count
+ sizeof(struct rx_free_desc) * rfd_ring->count
+ sizeof(struct rx_return_desc) * rrd_ring->count
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2c6ba046d2a8..17ae6df90723 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1145,7 +1145,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
break;
}
}
- if (false == pg_found) {
+ if (!pg_found) {
data[help_data->num_of_pg].pg = add_pg;
data[help_data->num_of_pg].pg_priority =
(1 << ttp[add_traf_type]);
@@ -1155,7 +1155,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
}
DP(BNX2X_MSG_DCB,
"add_traf_type %d pg_found %s num_of_pg %d\n",
- add_traf_type, (false == pg_found) ? "NO" : "YES",
+ add_traf_type, !pg_found ? "NO" : "YES",
help_data->num_of_pg);
}
}
@@ -1544,8 +1544,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
entry = 0;
- if (i == (num_of_pri-1) &&
- false == b_found_strict)
+ if (i == (num_of_pri-1) && !b_found_strict)
/* last entry will be handled separately
* If no priority is strict than last
* entry goes to last queue.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 517caedc0a87..1426c691c7c4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3085,6 +3085,7 @@ static int bnx2x_bsc_read(struct link_params *params,
u8 xfer_cnt,
u32 *data_array)
{
+ u64 t0, delta;
u32 val, i;
int rc = 0;
@@ -3114,17 +3115,18 @@ static int bnx2x_bsc_read(struct link_params *params,
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
/* Poll for completion */
- i = 0;
+ t0 = ktime_get_ns();
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
- udelay(10);
- val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
- if (i++ > 1000) {
- DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
- i);
+ delta = ktime_get_ns() - t0;
+ if (delta > 10 * NSEC_PER_MSEC) {
+ DP(NETIF_MSG_LINK, "wr 0 byte timed out after %Lu ns\n",
+ delta);
rc = -EFAULT;
break;
}
+ usleep_range(10, 20);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
}
if (rc == -EFAULT)
return rc;
@@ -3138,16 +3140,18 @@ static int bnx2x_bsc_read(struct link_params *params,
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
/* Poll for completion */
- i = 0;
+ t0 = ktime_get_ns();
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
- udelay(10);
- val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
- if (i++ > 1000) {
- DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
+ delta = ktime_get_ns() - t0;
+ if (delta > 10 * NSEC_PER_MSEC) {
+ DP(NETIF_MSG_LINK, "rd op timed out after %Lu ns\n",
+ delta);
rc = -EFAULT;
break;
}
+ usleep_range(10, 20);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
}
if (rc == -EFAULT)
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5097a44686b3..b4476f44e386 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -331,27 +331,6 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
BP_VFDB(bp)->vf_sbs_pool++;
}
-static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *obj,
- atomic_t *counter)
-{
- struct list_head *pos;
- int read_lock;
- int cnt = 0;
-
- read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
- if (read_lock)
- DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
-
- list_for_each(pos, &obj->head)
- cnt++;
-
- if (!read_lock)
- bnx2x_vlan_mac_h_read_unlock(bp, obj);
-
- atomic_set(counter, cnt);
-}
-
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, bool drv_only, int type)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 58e0d9a781e9..c62589c266b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1766,7 +1766,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.rx_buf_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
netdev_warn(bp->dev, "RX buffer error %x\n",
rx_err);
@@ -1849,7 +1849,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
}
}
@@ -5039,8 +5039,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req.lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN);
+ req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -5350,9 +5349,9 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + 0x10000;
+ db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
else
- db->doorbell = bp->bar1 + 0x4000;
+ db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -6359,6 +6358,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 min_db_offset = 0;
u16 flags;
int rc;
@@ -6407,6 +6407,21 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (!bp->max_mtu)
bp->max_mtu = BNXT_MAX_MTU;
+ if (bp->db_size)
+ goto func_qcfg_exit;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_PF(bp))
+ min_db_offset = DB_PF_OFFSET_P5;
+ else
+ min_db_offset = DB_VF_OFFSET_P5;
+ }
+ bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
+ bp->db_size <= min_db_offset)
+ bp->db_size = pci_resource_len(bp->pdev, 2);
+
func_qcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6428,23 +6443,13 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
if (!rc) {
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
- int i;
+ int i, tqm_rings;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
rc = -ENOMEM;
goto ctx_err;
}
- ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- bp->ctx = ctx;
ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
@@ -6477,6 +6482,20 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+ ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+ if (!ctx->tqm_fp_rings_count)
+ ctx->tqm_fp_rings_count = bp->max_q;
+
+ tqm_rings = ctx->tqm_fp_rings_count + 1;
+ ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
+ if (!ctx_pg) {
+ kfree(ctx);
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < tqm_rings; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+ bp->ctx = ctx;
} else {
rc = 0;
}
@@ -6729,7 +6748,7 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
return;
if (ctx->tqm_mem[0]) {
- for (i = 0; i < bp->max_q + 1; i++)
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
kfree(ctx->tqm_mem[0]);
ctx->tqm_mem[0] = NULL;
@@ -6750,6 +6769,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
+ u32 entries_sp, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
@@ -6839,14 +6859,17 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- entries = ctx->qp_max_l2_entries + extra_qps;
+ min = ctx->tqm_min_entries_per_ring;
+ entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
+ 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
+ entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
+ entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
- ctx->tqm_max_entries_per_ring);
- for (i = 0; i < bp->max_q + 1; i++) {
+ entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = entries;
- mem_size = ctx->tqm_entry_size * entries;
+ ctx_pg->entries = i ? entries : entries_sp;
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc)
return rc;
@@ -10259,7 +10282,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->missed_irqs++;
+ cpr->sw_stats.cmn.missed_irqs++;
}
}
}
@@ -10888,6 +10911,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->dev = dev;
bp->pdev = pdev;
+ /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
+ * determines the BAR size.
+ */
bp->bar0 = pci_ioremap_bar(pdev, 0);
if (!bp->bar0) {
dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
@@ -10895,13 +10921,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
- bp->bar1 = pci_ioremap_bar(pdev, 2);
- if (!bp->bar1) {
- dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
- rc = -ENOMEM;
- goto init_err_release;
- }
-
bp->bar2 = pci_ioremap_bar(pdev, 4);
if (!bp->bar2) {
dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
@@ -11823,6 +11842,16 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
return 0;
}
+static int bnxt_map_db_bar(struct bnxt *bp)
+{
+ if (!bp->db_size)
+ return -ENODEV;
+ bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
+ if (!bp->bar1)
+ return -ENOMEM;
+ return 0;
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -11883,6 +11912,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ rc = bnxt_map_db_bar(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto init_err_pci_clean;
+ }
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3d39638521d6..9e173d74b72a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -537,6 +537,9 @@ struct nqe_cn {
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
+#define DB_PF_OFFSET_P5 0x10000
+#define DB_VF_OFFSET_P5 0x4000
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -902,6 +905,20 @@ struct bnxt_rx_ring_info {
struct page_pool *page_pool;
};
+struct bnxt_rx_sw_stats {
+ u64 rx_l4_csum_errors;
+ u64 rx_buf_errors;
+};
+
+struct bnxt_cmn_sw_stats {
+ u64 missed_irqs;
+};
+
+struct bnxt_sw_stats {
+ struct bnxt_rx_sw_stats rx;
+ struct bnxt_cmn_sw_stats cmn;
+};
+
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
@@ -929,9 +946,8 @@ struct bnxt_cp_ring_info {
struct ctx_hw_stats *hw_stats;
dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id;
- u64 rx_l4_csum_errors;
- u64 rx_buf_errors;
- u64 missed_irqs;
+
+ struct bnxt_sw_stats sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1351,6 +1367,7 @@ struct bnxt_ctx_mem_info {
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
+ u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
@@ -1810,6 +1827,7 @@ struct bnxt {
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_NTP_FLTR_HASH_SIZE 512
@@ -1852,7 +1870,6 @@ struct bnxt {
u8 dsn[8];
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
- struct notifier_block tc_netdev_nb;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 360f9a95c1d5..6b88143af5ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,7 @@ reset_coalesce:
return rc;
}
-static const char * const bnxt_ring_stats_str[] = {
+static const char * const bnxt_ring_rx_stats_str[] = {
"rx_ucast_packets",
"rx_mcast_packets",
"rx_bcast_packets",
@@ -146,6 +146,9 @@ static const char * const bnxt_ring_stats_str[] = {
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
@@ -171,9 +174,12 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
"rx_tpa_errors",
};
-static const char * const bnxt_ring_sw_stats_str[] = {
+static const char * const bnxt_rx_sw_stats_str[] = {
"rx_l4_csum_errors",
"rx_buf_errors",
+};
+
+static const char * const bnxt_cmn_sw_stats_str[] = {
"missed_irqs",
};
@@ -303,6 +309,11 @@ static struct {
{0, "tx_total_discard_pkts"},
};
+#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
+#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
+#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
+#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
+
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
@@ -482,12 +493,21 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
static int bnxt_get_num_ring_stats(struct bnxt *bp)
{
- int num_stats;
+ int rx, tx, cmn;
+ bool sh = false;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
- num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
- ARRAY_SIZE(bnxt_ring_sw_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
- return num_stats * bp->cp_nr_rings;
+ rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
+ bnxt_get_num_tpa_ring_stats(bp);
+ tx = NUM_RING_TX_HW_STATS;
+ cmn = NUM_RING_CMN_SW_STATS;
+ if (sh)
+ return (rx + tx + cmn) * bp->cp_nr_rings;
+ else
+ return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ cmn * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp)
@@ -528,13 +548,29 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
}
}
+static bool is_rx_ring(struct bnxt *bp, int ring_num)
+{
+ return ring_num < bp->rx_nr_rings;
+}
+
+static bool is_tx_ring(struct bnxt *bp, int ring_num)
+{
+ int tx_base = 0;
+
+ if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+ tx_base = bp->rx_nr_rings;
+
+ if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
+ return true;
+ return false;
+}
+
static void bnxt_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
- bnxt_get_num_tpa_ring_stats(bp);
+ u32 tpa_stats;
if (!bp->bnapi) {
j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
@@ -544,17 +580,42 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
bnxt_sw_func_stats[i].counter = 0;
+ tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
__le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ u64 *sw;
int k;
- for (k = 0; k < stat_fields; j++, k++)
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ }
+ if (is_tx_ring(bp, i)) {
+ k = NUM_RING_RX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ }
+ if (!tpa_stats || !is_rx_ring(bp, i))
+ goto skip_tpa_ring_stats;
+
+ k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
+ for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
+ tpa_stats; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
- buf[j++] = cpr->rx_l4_csum_errors;
- buf[j++] = cpr->rx_buf_errors;
- buf[j++] = cpr->missed_irqs;
+
+skip_tpa_ring_stats:
+ sw = (u64 *)&cpr->sw_stats.rx;
+ if (is_rx_ring(bp, i)) {
+ for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
+ buf[j] = sw[k];
+ }
+
+ sw = (u64 *)&cpr->sw_stats.cmn;
+ for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
+ buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -632,31 +693,48 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- num_str = ARRAY_SIZE(bnxt_ring_stats_str);
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_rx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
- if (!BNXT_SUPPORTS_TPA(bp))
+ if (is_tx_ring(bp, i)) {
+ num_str = NUM_RING_TX_HW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_tx_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = bnxt_get_num_tpa_ring_stats(bp);
+ if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
- if (bp->max_tpa_v2) {
- num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ if (bp->max_tpa_v2)
str = bnxt_ring_tpa2_stats_str;
- } else {
- num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ else
str = bnxt_ring_tpa_stats_str;
- }
+
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i, str[j]);
buf += ETH_GSTRING_LEN;
}
skip_tpa_stats:
- num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ if (is_rx_ring(bp, i)) {
+ num_str = NUM_RING_RX_SW_STATS;
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_rx_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+ num_str = NUM_RING_CMN_SW_STATS;
for (j = 0; j < num_str; j++) {
sprintf(buf, "[%d]: %s", i,
- bnxt_ring_sw_stats_str[j]);
+ bnxt_cmn_sw_stats_str[j]);
buf += ETH_GSTRING_LEN;
}
}
@@ -1749,8 +1827,8 @@ static int bnxt_flash_nvram(struct net_device *dev,
return rc;
}
-static int bnxt_firmware_reset(struct net_device *dev,
- u16 dir_type)
+static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags)
{
struct hwrm_fw_reset_input req = {0};
struct bnxt *bp = netdev_priv(dev);
@@ -1758,48 +1836,77 @@ static int bnxt_firmware_reset(struct net_device *dev,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+ req.embedded_proc_type = proc_type;
+ req.selfrst_status = self_reset;
+ req.flags = flags;
+
+ if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ } else {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
+ }
+ return rc;
+}
+
+static int bnxt_firmware_reset(struct net_device *dev,
+ enum bnxt_nvm_directory_type dir_type)
+{
+ u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
+ u8 proc_type, flags = 0;
+
/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
/* (e.g. when firmware isn't already running) */
switch (dir_type) {
case BNX_DIR_TYPE_CHIMP_PATCH:
case BNX_DIR_TYPE_BOOTCODE:
case BNX_DIR_TYPE_BOOTCODE_2:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
/* Self-reset ChiMP upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
/* Self-reset APE upon next PCIe reset: */
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
+ self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
- req.embedded_proc_type =
- FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
break;
case BNX_DIR_TYPE_BONO_FW:
case BNX_DIR_TYPE_BONO_PATCH:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
- break;
- case BNXT_FW_RESET_CHIP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
- req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
- if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
- req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
- break;
- case BNXT_FW_RESET_AP:
- req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+ proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
break;
default:
return -EINVAL;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc == -EACCES)
- bnxt_print_admin_err(bp);
- return rc;
+ return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
+}
+
+static int bnxt_firmware_reset_chip(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 flags = 0;
+
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+
+ return bnxt_hwrm_firmware_reset(dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ flags);
+}
+
+static int bnxt_firmware_reset_ap(struct net_device *dev)
+{
+ return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
+ 0);
}
static int bnxt_flash_firmware(struct net_device *dev,
@@ -1988,9 +2095,9 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
rc, filename);
return rc;
}
- if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ if (bnxt_dir_type_is_ape_bin_format(dir_type))
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
- else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ else if (bnxt_dir_type_is_other_exec_format(dir_type))
rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
@@ -2378,7 +2485,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
}
/* Create or re-write an NVM item: */
- if (bnxt_dir_type_is_executable(type) == true)
+ if (bnxt_dir_type_is_executable(type))
return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
@@ -2976,7 +3083,11 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
static int bnxt_reset(struct net_device *dev, u32 *flags)
{
struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
+ bool reload = false;
+ u32 req = *flags;
+
+ if (!req)
+ return -EINVAL;
if (!BNXT_PF(bp)) {
netdev_err(dev, "Reset is not supported from a VF\n");
@@ -2990,33 +3101,37 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EBUSY;
}
- if (*flags == ETH_RESET_ALL) {
+ if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
/* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
-
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
- if (!rc) {
- netdev_info(dev, "Reset request successful.\n");
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
- netdev_info(dev, "Reload driver to complete reset\n");
- *flags = 0;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_chip(dev)) {
+ netdev_info(dev, "Firmware reset request successful.\n");
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_CHIP;
+ }
+ } else if (req == BNXT_FW_RESET_CHIP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
- } else if (*flags == ETH_RESET_AP) {
- /* This feature is not supported in older firmware versions */
- if (bp->hwrm_spec_code < 0x10803)
- return -EOPNOTSUPP;
+ }
- rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
- if (!rc) {
- netdev_info(dev, "Reset Application Processor request successful.\n");
- *flags = 0;
+ if (req & BNXT_FW_RESET_AP) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code >= 0x10803) {
+ if (!bnxt_firmware_reset_ap(dev)) {
+ netdev_info(dev, "Reset application processor successful.\n");
+ reload = true;
+ *flags &= ~BNXT_FW_RESET_AP;
+ }
+ } else if (req == BNXT_FW_RESET_AP) {
+ return -EOPNOTSUPP; /* only request, fail hard */
}
- } else {
- rc = -EINVAL;
}
- return rc;
+ if (reload)
+ netdev_info(dev, "Reload driver to complete reset\n");
+
+ return 0;
}
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 3576d951727b..ce7585ff9e4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -77,8 +77,12 @@ struct hwrm_dbg_cmn_output {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
-#define BNXT_FW_RESET_AP 0xfffe
-#define BNXT_FW_RESET_CHIP 0xffff
+#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT)
+#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \
+ ETH_RESET_DMA | ETH_RESET_FILTER | \
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC | \
+ ETH_RESET_PHY | ETH_RESET_RAM) \
+ << ETH_RESET_SHARED_SHIFT)
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7cf27dffadb5..7e9235c8d21e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2019 Broadcom Inc.
+ * Copyright (c) 2018-2020 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -207,6 +207,8 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_READ 0xb6UL
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_PORT_ECN_QSTATS 0xbaUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -220,6 +222,8 @@ struct cmd_nums {
#define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
#define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
#define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
#define HWRM_EXEC_FWD_RESP 0xd0UL
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
@@ -233,6 +237,7 @@ struct cmd_nums {
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -331,6 +336,7 @@ struct cmd_nums {
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -341,6 +347,31 @@ struct cmd_nums {
#define HWRM_MFG_OTP_CFG 0x207UL
#define HWRM_MFG_OTP_QCFG 0x208UL
#define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_ATTACH 0x2c7UL
+ #define HWRM_TF_SESSION_CLOSE 0x2c8UL
+ #define HWRM_TF_SESSION_QCFG 0x2c9UL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL
+ #define HWRM_TF_TBL_TYPE_GET 0x2d0UL
+ #define HWRM_TF_TBL_TYPE_SET 0x2d1UL
+ #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL
+ #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL
+ #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL
+ #define HWRM_TF_EXT_EM_OP 0x2ddUL
+ #define HWRM_TF_EXT_EM_CFG 0x2deUL
+ #define HWRM_TF_EXT_EM_QCFG 0x2dfUL
+ #define HWRM_TF_TCAM_SET 0x2eeUL
+ #define HWRM_TF_TCAM_GET 0x2efUL
+ #define HWRM_TF_TCAM_MOVE 0x2f0UL
+ #define HWRM_TF_TCAM_FREE 0x2f1UL
+ #define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -356,6 +387,10 @@ struct cmd_nums {
#define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
#define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -429,8 +464,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 12
-#define HWRM_VERSION_STR "1.10.1.12"
+#define HWRM_VERSION_RSVD 33
+#define HWRM_VERSION_STR "1.10.1.33"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -482,6 +517,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -647,6 +683,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1089,7 +1126,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:640b/80B) */
+/* hwrm_func_qcaps_output (size:704b/88B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1126,6 +1163,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
#define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
#define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1146,7 +1187,12 @@ struct hwrm_func_qcaps_output {
__le32 max_flow_id;
__le32 max_hw_ring_grps;
__le16 max_sp_tx_rings;
- u8 unused_0;
+ u8 unused_0[2];
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ u8 unused_1[3];
u8 valid;
};
@@ -1161,7 +1207,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:704b/88B) */
+/* hwrm_func_qcfg_output (size:768b/96B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1267,7 +1313,11 @@ struct hwrm_func_qcfg_output {
u8 always_1;
__le32 reset_addr_poll;
__le16 legacy_l2_db_size_kb;
- u8 unused_2[1];
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 unused_2[7];
u8 valid;
};
@@ -1420,9 +1470,10 @@ struct hwrm_func_qstats_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY
+ #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK
u8 unused_0[5];
};
@@ -1456,6 +1507,53 @@ struct hwrm_func_qstats_output {
u8 valid;
};
+/* hwrm_func_qstats_ext_input (size:192b/24B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_ext_output (size:1472b/184B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
__le16 req_type;
@@ -1808,7 +1906,7 @@ struct hwrm_func_backing_store_qcaps_output {
u8 ctx_kind_initializer;
__le32 rsvd;
__le16 rsvd1;
- u8 rsvd2;
+ u8 tqm_fp_rings_count;
u8 valid;
};
@@ -2231,7 +2329,17 @@ struct hwrm_error_recovery_qcfg_output {
#define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
__le32 reset_reg_val[16];
u8 delay_after_reset[16];
- u8 unused_1[7];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
u8 valid;
};
@@ -2934,7 +3042,11 @@ struct hwrm_port_qstats_input {
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
- u8 unused_0[6];
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3058,7 +3170,11 @@ struct hwrm_port_qstats_ext_input {
__le16 port_id;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[2];
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0;
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
@@ -3840,14 +3956,22 @@ struct hwrm_queue_pfcenable_qcfg_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
u8 unused_0[3];
u8 valid;
};
@@ -3860,14 +3984,22 @@ struct hwrm_queue_pfcenable_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
__le16 port_id;
u8 unused_0[2];
};
@@ -5287,7 +5419,11 @@ struct hwrm_ring_cmpl_ring_qaggint_params_input {
__le16 target_id;
__le64 resp_addr;
__le16 ring_id;
- u8 unused_0[6];
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
};
/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
@@ -7618,7 +7754,9 @@ struct hwrm_nvm_modify_input {
__le64 resp_addr;
__le64 host_src_addr;
__le16 dir_idx;
- u8 unused_0[2];
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
__le32 offset;
__le32 len;
u8 unused_1[4];
@@ -8027,4 +8165,18 @@ struct hwrm_selftest_irq_output {
u8 valid;
};
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+};
+
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index cea2f9958a1d..3a9a51f7063a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -645,7 +645,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
req.mru = cpu_to_le16(mtu);
req.mtu = cpu_to_le16(mtu);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 782ea0771221..0eef4f5e4a46 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1939,53 +1939,25 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
return 0;
}
-static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
-{
- switch (type) {
- case TC_SETUP_BLOCK:
- return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
{
return netif_is_vxlan(netdev);
}
-static int bnxt_tc_indr_block_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
{
- struct net_device *netdev;
- struct bnxt *bp;
- int rc;
-
- netdev = netdev_notifier_info_to_dev(ptr);
if (!bnxt_is_netdev_indr_offload(netdev))
- return NOTIFY_OK;
-
- bp = container_of(nb, struct bnxt, tc_netdev_nb);
+ return -EOPNOTSUPP;
- switch (event) {
- case NETDEV_REGISTER:
- rc = __flow_indr_block_cb_register(netdev, bp,
- bnxt_tc_setup_indr_cb,
- bp);
- if (rc)
- netdev_info(bp->dev,
- "Failed to register indirect blk: dev: %s\n",
- netdev->name);
- break;
- case NETDEV_UNREGISTER:
- __flow_indr_block_cb_unregister(netdev,
- bnxt_tc_setup_indr_cb,
- bp);
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+ default:
break;
}
- return NOTIFY_DONE;
+ return -EOPNOTSUPP;
}
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
@@ -2074,8 +2046,8 @@ int bnxt_init_tc(struct bnxt *bp)
/* init indirect block notifications */
INIT_LIST_HEAD(&bp->tc_indr_block_list);
- bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event;
- rc = register_netdevice_notifier(&bp->tc_netdev_nb);
+
+ rc = flow_indr_dev_register(bnxt_tc_setup_indr_cb, bp);
if (!rc)
return 0;
@@ -2101,7 +2073,8 @@ void bnxt_shutdown_tc(struct bnxt *bp)
if (!bnxt_tc_flower_enabled(bp))
return;
- unregister_netdevice_notifier(&bp->tc_netdev_nb);
+ flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
+ bnxt_tc_setup_indr_block_cb);
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 4a316c4b3fa8..8c8368c2f335 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -104,7 +104,13 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
- ent[i].db_offset = (idx + i) * 0x80;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ ent[i].db_offset = DB_PF_OFFSET_P5;
+ if (BNXT_VF(bp))
+ ent[i].db_offset = DB_VF_OFFSET_P5;
+ } else {
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
}
}
@@ -475,6 +481,8 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
bp->edev = edev;
}
return bp->edev;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 9895406b9830..6b4d2556a6df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -67,6 +67,14 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ int l2_db_size; /* Doorbell BAR size in
+ * bytes mapped by L2
+ * driver.
+ */
+ int l2_db_size_nc; /* Doorbell BAR size in
+ * bytes mapped as non-
+ * cacheable.
+ */
};
struct bnxt_en_ops {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c6f6f2033880..5e3b4a3b69ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -138,6 +138,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
xdp.rxq = &rxr->xdp_rxq;
+ xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 61ab7d21f6bd..c5cca63b8571 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1918,7 +1918,6 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
if (ret) {
atomic_dec(&cp->iscsi_conn);
- ret = 0;
goto done;
}
ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 79636c78127c..ff31da0ed846 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2019 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -23,11 +23,6 @@
#include <linux/dma-mapping.h>
#include <linux/pm.h>
#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
#include <net/arp.h>
#include <linux/mii.h>
@@ -70,6 +65,9 @@
#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
TOTAL_DESC * DMA_DESC_SIZE)
+/* Forward declarations */
+static void bcmgenet_set_rx_mode(struct net_device *dev);
+
static inline void bcmgenet_writel(u32 value, void __iomem *offset)
{
/* MIPS chips strapped for BE will automagically configure the
@@ -461,6 +459,384 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
+static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
+ u32 f_index)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ return !!(reg & (1 << (f_index % 32)));
+}
+
+static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+}
+
+static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+ u32 offset, reg, reg1;
+
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
+}
+
+static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
+ u32 f_index, u32 rx_queue)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = f_index / 8;
+ reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
+ reg &= ~(0xF << (4 * (f_index % 8)));
+ reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
+ bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
+}
+
+static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
+ u32 f_index, u32 f_length)
+{
+ u32 offset;
+ u32 reg;
+
+ offset = HFB_FLT_LEN_V3PLUS +
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
+ sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg &= ~(0xFF << (8 * (f_index % 4)));
+ reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+}
+
+static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
+{
+ u32 f_index;
+
+ /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
+ for (f_index = MAX_NUM_OF_FS_RULES;
+ f_index < priv->hw_params->hfb_filter_cnt; f_index++)
+ if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
+ return f_index;
+
+ return -ENOMEM;
+}
+
+static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
+{
+ while (size) {
+ switch (*(unsigned char *)mask++) {
+ case 0x00:
+ case 0x0f:
+ case 0xf0:
+ case 0xff:
+ size--;
+ continue;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+#define VALIDATE_MASK(x) \
+ bcmgenet_hfb_validate_mask(&(x), sizeof(x))
+
+static int bcmgenet_hfb_insert_data(u32 *f, int offset,
+ void *val, void *mask, size_t size)
+{
+ int index;
+ u32 tmp;
+
+ index = offset / 2;
+ tmp = f[index];
+
+ while (size--) {
+ if (offset++ & 1) {
+ tmp &= ~0x300FF;
+ tmp |= (*(unsigned char *)val++);
+ switch ((*(unsigned char *)mask++)) {
+ case 0xFF:
+ tmp |= 0x30000;
+ break;
+ case 0xF0:
+ tmp |= 0x20000;
+ break;
+ case 0x0F:
+ tmp |= 0x10000;
+ break;
+ }
+ f[index++] = tmp;
+ if (size)
+ tmp = f[index];
+ } else {
+ tmp &= ~0xCFF00;
+ tmp |= (*(unsigned char *)val++) << 8;
+ switch ((*(unsigned char *)mask++)) {
+ case 0xFF:
+ tmp |= 0xC0000;
+ break;
+ case 0xF0:
+ tmp |= 0x80000;
+ break;
+ case 0x0F:
+ tmp |= 0x40000;
+ break;
+ }
+ if (!size)
+ f[index] = tmp;
+ }
+ }
+
+ return 0;
+}
+
+static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data,
+ u32 f_length, u32 rx_queue, int f_index)
+{
+ u32 base = f_index * priv->hw_params->hfb_filter_size;
+ int i;
+
+ for (i = 0; i < f_length; i++)
+ bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32));
+
+ bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
+}
+
+static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
+ struct bcmgenet_rxnfc_rule *rule)
+{
+ struct ethtool_rx_flow_spec *fs = &rule->fs;
+ int err = 0, offset = 0, f_length = 0;
+ u16 val_16, mask_16;
+ u8 val_8, mask_8;
+ size_t size;
+ u32 *f_data;
+
+ f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32),
+ GFP_KERNEL);
+ if (!f_data)
+ return -ENOMEM;
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ bcmgenet_hfb_insert_data(f_data, 0,
+ &fs->h_ext.h_dest, &fs->m_ext.h_dest,
+ sizeof(fs->h_ext.h_dest));
+ }
+
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->m_ext.vlan_etype ||
+ fs->m_ext.vlan_tci) {
+ bcmgenet_hfb_insert_data(f_data, 12,
+ &fs->h_ext.vlan_etype,
+ &fs->m_ext.vlan_etype,
+ sizeof(fs->h_ext.vlan_etype));
+ bcmgenet_hfb_insert_data(f_data, 14,
+ &fs->h_ext.vlan_tci,
+ &fs->m_ext.vlan_tci,
+ sizeof(fs->h_ext.vlan_tci));
+ offset += VLAN_HLEN;
+ f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
+ }
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ f_length += DIV_ROUND_UP(ETH_HLEN, 2);
+ bcmgenet_hfb_insert_data(f_data, 0,
+ &fs->h_u.ether_spec.h_dest,
+ &fs->m_u.ether_spec.h_dest,
+ sizeof(fs->h_u.ether_spec.h_dest));
+ bcmgenet_hfb_insert_data(f_data, ETH_ALEN,
+ &fs->h_u.ether_spec.h_source,
+ &fs->m_u.ether_spec.h_source,
+ sizeof(fs->h_u.ether_spec.h_source));
+ bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ &fs->h_u.ether_spec.h_proto,
+ &fs->m_u.ether_spec.h_proto,
+ sizeof(fs->h_u.ether_spec.h_proto));
+ break;
+ case IP_USER_FLOW:
+ f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
+ /* Specify IP Ether Type */
+ val_16 = htons(ETH_P_IP);
+ mask_16 = 0xFFFF;
+ bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmgenet_hfb_insert_data(f_data, 15 + offset,
+ &fs->h_u.usr_ip4_spec.tos,
+ &fs->m_u.usr_ip4_spec.tos,
+ sizeof(fs->h_u.usr_ip4_spec.tos));
+ bcmgenet_hfb_insert_data(f_data, 23 + offset,
+ &fs->h_u.usr_ip4_spec.proto,
+ &fs->m_u.usr_ip4_spec.proto,
+ sizeof(fs->h_u.usr_ip4_spec.proto));
+ bcmgenet_hfb_insert_data(f_data, 26 + offset,
+ &fs->h_u.usr_ip4_spec.ip4src,
+ &fs->m_u.usr_ip4_spec.ip4src,
+ sizeof(fs->h_u.usr_ip4_spec.ip4src));
+ bcmgenet_hfb_insert_data(f_data, 30 + offset,
+ &fs->h_u.usr_ip4_spec.ip4dst,
+ &fs->m_u.usr_ip4_spec.ip4dst,
+ sizeof(fs->h_u.usr_ip4_spec.ip4dst));
+ if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
+ break;
+
+ /* Only supports 20 byte IPv4 header */
+ val_8 = 0x45;
+ mask_8 = 0xFF;
+ bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset,
+ &val_8, &mask_8,
+ sizeof(val_8));
+ size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
+ bcmgenet_hfb_insert_data(f_data,
+ ETH_HLEN + 20 + offset,
+ &fs->h_u.usr_ip4_spec.l4_4_bytes,
+ &fs->m_u.usr_ip4_spec.l4_4_bytes,
+ size);
+ f_length += DIV_ROUND_UP(size, 2);
+ break;
+ }
+
+ if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
+ /* Ring 0 flows can be handled by the default Descriptor Ring
+ * We'll map them to ring 0, but don't enable the filter
+ */
+ bcmgenet_hfb_set_filter(priv, f_data, f_length, 0,
+ fs->location);
+ rule->state = BCMGENET_RXNFC_STATE_DISABLED;
+ } else {
+ /* Other Rx rings are direct mapped here */
+ bcmgenet_hfb_set_filter(priv, f_data, f_length,
+ fs->ring_cookie, fs->location);
+ bcmgenet_hfb_enable_filter(priv, fs->location);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
+ }
+
+ kfree(f_data);
+
+ return err;
+}
+
+/* bcmgenet_hfb_add_filter
+ *
+ * Add new filter to Hardware Filter Block to match and direct Rx traffic to
+ * desired Rx queue.
+ *
+ * f_data is an array of unsigned 32-bit integers where each 32-bit integer
+ * provides filter data for 2 bytes (4 nibbles) of Rx frame:
+ *
+ * bits 31:20 - unused
+ * bit 19 - nibble 0 match enable
+ * bit 18 - nibble 1 match enable
+ * bit 17 - nibble 2 match enable
+ * bit 16 - nibble 3 match enable
+ * bits 15:12 - nibble 0 data
+ * bits 11:8 - nibble 1 data
+ * bits 7:4 - nibble 2 data
+ * bits 3:0 - nibble 3 data
+ *
+ * Example:
+ * In order to match:
+ * - Ethernet frame type = 0x0800 (IP)
+ * - IP version field = 4
+ * - IP protocol field = 0x11 (UDP)
+ *
+ * The following filter is needed:
+ * u32 hfb_filter_ipv4_udp[] = {
+ * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
+ * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
+ * };
+ *
+ * To add the filter to HFB and direct the traffic to Rx queue 0, call:
+ * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
+ * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
+ */
+int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
+ u32 f_length, u32 rx_queue)
+{
+ int f_index;
+
+ f_index = bcmgenet_hfb_find_unused_filter(priv);
+ if (f_index < 0)
+ return -ENOMEM;
+
+ if (f_length > priv->hw_params->hfb_filter_size)
+ return -EINVAL;
+
+ bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
+ bcmgenet_hfb_enable_filter(priv, f_index);
+
+ return 0;
+}
+
+/* bcmgenet_hfb_clear
+ *
+ * Clear Hardware Filter Block and disable all filtering.
+ */
+static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
+{
+ u32 i;
+
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
+
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0x0, i);
+
+ for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
+ bcmgenet_hfb_reg_writel(priv, 0x0,
+ HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+
+ for (i = 0; i < priv->hw_params->hfb_filter_cnt *
+ priv->hw_params->hfb_filter_size; i++)
+ bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
+}
+
+static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
+ INIT_LIST_HEAD(&priv->rxnfc_list);
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
+ priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
+ }
+
+ bcmgenet_hfb_clear(priv);
+}
+
static int bcmgenet_begin(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -1045,6 +1421,229 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phy_ethtool_set_eee(dev->phydev, e);
}
+static int bcmgenet_validate_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_usrip4_spec *l4_mask;
+ struct ethhdr *eth_mask;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
+ netdev_err(dev, "rxnfc: Invalid location (%d)\n",
+ cmd->fs.location);
+ return -EINVAL;
+ }
+
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case IP_USER_FLOW:
+ l4_mask = &cmd->fs.m_u.usr_ip4_spec;
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(l4_mask->ip4src) ||
+ VALIDATE_MASK(l4_mask->ip4dst) ||
+ VALIDATE_MASK(l4_mask->l4_4_bytes) ||
+ VALIDATE_MASK(l4_mask->proto) ||
+ VALIDATE_MASK(l4_mask->ip_ver) ||
+ VALIDATE_MASK(l4_mask->tos)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ break;
+ case ETHER_FLOW:
+ eth_mask = &cmd->fs.m_u.ether_spec;
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(eth_mask->h_source) ||
+ VALIDATE_MASK(eth_mask->h_source) ||
+ VALIDATE_MASK(eth_mask->h_proto)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
+ cmd->fs.flow_type);
+ return -EINVAL;
+ }
+
+ if ((cmd->fs.flow_type & FLOW_EXT)) {
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
+ VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
+ netdev_err(dev, "rxnfc: user-def not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
+ /* don't allow mask which isn't valid */
+ if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
+ netdev_err(dev, "rxnfc: Unsupported mask\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int bcmgenet_insert_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *loc_rule;
+ int err;
+
+ if (priv->hw_params->hfb_filter_size < 128) {
+ netdev_err(dev, "rxnfc: Not supported by this device\n");
+ return -EINVAL;
+ }
+
+ if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+
+ err = bcmgenet_validate_flow(dev, cmd);
+ if (err)
+ return err;
+
+ loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ list_del(&loc_rule->list);
+ loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
+ memcpy(&loc_rule->fs, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+ err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
+ if (err) {
+ netdev_err(dev, "rxnfc: Could not install rule (%d)\n",
+ err);
+ return err;
+ }
+
+ list_add_tail(&loc_rule->list, &priv->rxnfc_list);
+
+ return 0;
+}
+
+static int bcmgenet_delete_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ list_del(&rule->list);
+ rule->state = BCMGENET_RXNFC_STATE_UNUSED;
+ memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
+
+out:
+ return err;
+}
+
+static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = bcmgenet_insert_flow(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = bcmgenet_delete_flow(dev, cmd);
+ break;
+ default:
+ netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
+ cmd->cmd);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ int loc)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+
+ if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->rxnfc_rules[loc];
+ if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
+ err = -ENOENT;
+ else
+ memcpy(&cmd->fs, &rule->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+ return err;
+}
+
+static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
+{
+ struct list_head *pos;
+ int res = 0;
+
+ list_for_each(pos, &priv->rxnfc_list)
+ res++;
+
+ return res;
+}
+
+static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ int err = 0;
+ int i = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->hw_params->rx_queues ?: 1;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bcmgenet_get_num_flows(priv);
+ cmd->data = MAX_NUM_OF_FS_RULES;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (i < cmd->rule_cnt)
+ rule_locs[i++] = rule->fs.location;
+ cmd->rule_cnt = i;
+ cmd->data = MAX_NUM_OF_FS_RULES;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
@@ -1069,6 +1668,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_link_ksettings = bcmgenet_get_link_ksettings,
.set_link_ksettings = bcmgenet_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_rxnfc = bcmgenet_get_rxnfc,
+ .set_rxnfc = bcmgenet_set_rxnfc,
};
/* Power down the unimac, based on mode. */
@@ -2669,10 +3270,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
{
- struct bcmgenet_priv *priv = dev_id;
-
- pm_wakeup_event(&priv->pdev->dev, 0);
-
+ /* Acknowledge the interrupt */
return IRQ_HANDLED;
}
@@ -2710,9 +3308,8 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
unsigned char *addr)
{
- bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
- (addr[2] << 8) | addr[3], UMAC_MAC0);
- bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+ bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
+ bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
}
static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
@@ -2721,13 +3318,9 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
u32 addr_tmp;
addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
- addr[0] = addr_tmp >> 24;
- addr[1] = (addr_tmp >> 16) & 0xff;
- addr[2] = (addr_tmp >> 8) & 0xff;
- addr[3] = addr_tmp & 0xff;
+ put_unaligned_be32(addr_tmp, &addr[0]);
addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
- addr[4] = (addr_tmp >> 8) & 0xff;
- addr[5] = addr_tmp & 0xff;
+ put_unaligned_be16(addr_tmp, &addr[4]);
}
/* Returns a reusable dma control register value */
@@ -2766,43 +3359,12 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
-/* bcmgenet_hfb_clear
- *
- * Clear Hardware Filter Block and disable all filtering.
- */
-static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
-{
- u32 i;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
-
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
-
- for (i = 0; i < priv->hw_params->hfb_filter_cnt *
- priv->hw_params->hfb_filter_size; i++)
- bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
-}
-
-static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
-{
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_clear(priv);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ bcmgenet_set_rx_mode(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3421,8 +3983,6 @@ MODULE_DEVICE_TABLE(of, bcmgenet_match);
static int bcmgenet_probe(struct platform_device *pdev)
{
struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
- struct device_node *dn = pdev->dev.of_node;
- const struct of_device_id *of_id = NULL;
const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
@@ -3437,12 +3997,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (dn) {
- of_id = of_match_node(bcmgenet_match, dn);
- if (!of_id)
- return -EINVAL;
- }
-
priv = netdev_priv(dev);
priv->irq0 = platform_get_irq(pdev, 0);
if (priv->irq0 < 0) {
@@ -3504,13 +4058,16 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
}
- priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+ priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk)) {
dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
- priv->clk = NULL;
+ err = PTR_ERR(priv->clk);
+ goto err;
}
- clk_prepare_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ goto err;
bcmgenet_set_hw_params(priv);
@@ -3528,16 +4085,18 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->rx_buf_len = RX_BUF_LENGTH;
INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
- priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+ priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
if (IS_ERR(priv->clk_wol)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
- priv->clk_wol = NULL;
+ err = PTR_ERR(priv->clk_wol);
+ goto err;
}
- priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
+ priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
- priv->clk_eee = NULL;
+ err = PTR_ERR(priv->clk_eee);
+ goto err;
}
/* If this is an internal GPHY, power it on now, before UniMAC is
@@ -3546,7 +4105,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- if ((pd) && (!IS_ERR_OR_NULL(pd->mac_address)))
+ if (pd && !IS_ERR_OR_NULL(pd->mac_address))
ether_addr_copy(dev->dev_addr, pd->mac_address);
else
if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
@@ -3612,11 +4171,10 @@ static void bcmgenet_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int bcmgenet_resume(struct device *d)
+static int bcmgenet_resume_noirq(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
u32 reg;
@@ -3628,6 +4186,38 @@ static int bcmgenet_resume(struct device *d)
if (ret)
return ret;
+ if (device_may_wakeup(d) && priv->wolopts) {
+ /* Account for Wake-on-LAN events and clear those events
+ * (Some devices need more time between enabling the clocks
+ * and the interrupt register reflecting the wake event so
+ * read the register twice)
+ */
+ reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
+ reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
+ if (reg & UMAC_IRQ_WAKE_EVENT)
+ pm_wakeup_event(&priv->pdev->dev, 0);
+ }
+
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+
+ return 0;
+}
+
+static int bcmgenet_resume(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long dma_ctrl;
+ u32 offset, reg;
+ int ret;
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (device_may_wakeup(d) && priv->wolopts)
+ bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
@@ -3638,10 +4228,6 @@ static int bcmgenet_resume(struct device *d)
init_umac(priv);
- /* From WOL-enabled suspend, switch to regular clock */
- if (priv->wolopts)
- clk_disable_unprepare(priv->clk_wol);
-
phy_init_hw(dev->phydev);
/* Speed settings must be restored */
@@ -3653,15 +4239,17 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset);
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32));
+ bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL);
+
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
- if (priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@@ -3698,7 +4286,7 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret = 0;
+ u32 offset;
if (!netif_running(dev))
return 0;
@@ -3710,25 +4298,53 @@ static int bcmgenet_suspend(struct device *d)
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
+ /* Preserve filter state and disable filtering */
+ priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset);
+ priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_suspend_noirq(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
- if (device_may_wakeup(d) && priv->wolopts) {
+ if (device_may_wakeup(d) && priv->wolopts)
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
- clk_prepare_enable(priv->clk_wol);
- } else if (priv->internal_phy) {
+ else if (priv->internal_phy)
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- }
+
+ /* Let the framework handle resumption and leave the clocks on */
+ if (ret)
+ return ret;
/* Turn off the clocks */
clk_disable_unprepare(priv->clk);
- if (ret)
- bcmgenet_resume(d);
-
- return ret;
+ return 0;
}
+#else
+#define bcmgenet_suspend NULL
+#define bcmgenet_suspend_noirq NULL
+#define bcmgenet_resume NULL
+#define bcmgenet_resume_noirq NULL
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
+static const struct dev_pm_ops bcmgenet_pm_ops = {
+ .suspend = bcmgenet_suspend,
+ .suspend_noirq = bcmgenet_suspend_noirq,
+ .resume = bcmgenet_resume,
+ .resume_noirq = bcmgenet_resume_noirq,
+};
static const struct acpi_device_id genet_acpi_match[] = {
{ "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
@@ -3744,7 +4360,7 @@ static struct platform_driver bcmgenet_driver = {
.name = "bcmgenet",
.of_match_table = bcmgenet_match,
.pm = &bcmgenet_pm_ops,
- .acpi_match_table = ACPI_PTR(genet_acpi_match),
+ .acpi_match_table = genet_acpi_match,
},
};
module_platform_driver(bcmgenet_driver);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index daf8fb2c39b6..a12cb59298f4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -14,6 +14,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <linux/dim.h>
+#include <linux/ethtool.h>
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -31,6 +32,7 @@
#define DMA_MAX_BURST_LENGTH 0x10
/* misc. configuration */
+#define MAX_NUM_OF_FS_RULES 16
#define CLEAR_ALL_HFB 0xFF
#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
#define DMA_FC_THRESH_LO 5
@@ -310,6 +312,8 @@ struct bcmgenet_mib_counters {
#define UMAC_IRQ_HFB_SM (1 << 10)
#define UMAC_IRQ_HFB_MM (1 << 11)
#define UMAC_IRQ_MPD_R (1 << 12)
+#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \
+ UMAC_IRQ_MPD_R)
#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
@@ -608,6 +612,18 @@ struct bcmgenet_rx_ring {
struct bcmgenet_priv *priv;
};
+enum bcmgenet_rxnfc_state {
+ BCMGENET_RXNFC_STATE_UNUSED = 0,
+ BCMGENET_RXNFC_STATE_DISABLED,
+ BCMGENET_RXNFC_STATE_ENABLED
+};
+
+struct bcmgenet_rxnfc_rule {
+ struct list_head list;
+ struct ethtool_rx_flow_spec fs;
+ enum bcmgenet_rxnfc_state state;
+};
+
/* device context */
struct bcmgenet_priv {
void __iomem *base;
@@ -626,6 +642,8 @@ struct bcmgenet_priv {
struct enet_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_buf_len;
+ struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
+ struct list_head rxnfc_list;
struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
@@ -676,6 +694,9 @@ struct bcmgenet_priv {
/* WOL */
struct clk *clk_wol;
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
+ bool wol_active;
+ u32 hfb_en[3];
struct bcmgenet_mib_counters mib;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index c9a43695b182..4ea6a26b04f7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2020 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -41,18 +41,13 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 reg;
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+ wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
memset(wol->sopass, 0, sizeof(wol->sopass));
- if (wol->wolopts & WAKE_MAGICSECURE) {
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS);
- put_unaligned_be16(reg, &wol->sopass[0]);
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
- put_unaligned_be32(reg, &wol->sopass[2]);
- }
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
/* ethtool function - set WOL (Wake on LAN) settings.
@@ -62,25 +57,15 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
- u32 reg;
if (!device_can_wakeup(kdev))
return -ENOTSUPP;
- if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
+ if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER))
return -EINVAL;
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- if (wol->wolopts & WAKE_MAGICSECURE) {
- bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
- UMAC_MPD_PW_MS);
- bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
- UMAC_MPD_PW_LS);
- reg |= MPD_PW_EN;
- } else {
- reg &= ~MPD_PW_EN;
- }
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
@@ -120,12 +105,21 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
return retries;
}
+static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv)
+{
+ bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
+ UMAC_MPD_PW_MS);
+ bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
+ UMAC_MPD_PW_LS);
+}
+
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_ctrl_reg, hfb_enable = 0;
int retries = 0;
- u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, dev, "unsupported mode: %d\n", mode);
@@ -142,22 +136,48 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
mdelay(10);
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- reg |= MPD_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+ reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ reg |= MPD_EN;
+ if (priv->wolopts & WAKE_MAGICSECURE) {
+ bcmgenet_set_mpd_password(priv);
+ reg |= MPD_PW_EN;
+ }
+ bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ }
+
+ hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (priv->wolopts & WAKE_FILTER) {
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
+ hfb_enable |= (1 << rule->fs.location);
+ reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
if (retries < 0) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~MPD_EN;
+ reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
return retries;
}
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
+ clk_prepare_enable(priv->clk_wol);
+ priv->wol_active = 1;
+
+ if (hfb_enable) {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
+ }
+
/* Enable CRC forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
@@ -173,6 +193,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
+ reg = UMAC_IRQ_MPD_R;
+ if (hfb_enable)
+ reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
+
+ bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
+
return 0;
}
@@ -186,12 +212,22 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
return;
}
+ if (!priv->wol_active)
+ return; /* failed to suspend so skip the rest */
+
+ priv->wol_active = 0;
+ clk_disable_unprepare(priv->clk_wol);
+
+ /* Disable Magic Packet Detection */
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- if (!(reg & MPD_EN))
- return; /* already powered up so skip the rest */
- reg &= ~MPD_EN;
+ reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ /* Disable WAKE_FILTER Detection */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+
/* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff98a82b7bc4..7a3b22b35238 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10797,17 +10797,15 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
#ifdef CONFIG_TIGON3_HWMON
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
+ u32 off, len = TG3_OCIR_LEN;
int i;
- for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
- u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
-
+ for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
- off += len;
if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
!(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
- memset(ocir, 0, TG3_OCIR_LEN);
+ memset(ocir, 0, len);
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index d7e805749a5b..e40c64b79f66 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -782,7 +782,6 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
if ((work_done < budget && tx_done) ||
(iq && iq->pkt_in_done >= MAX_REG_CNT) ||
(droq->pkt_count >= MAX_REG_CNT)) {
- tx_done = 1;
napi_complete_done(napi, work_done);
octeon_enable_irq(droq->oct_dev, droq->q_no);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 3d01d3602d8f..fb380b4f3e02 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -712,18 +712,6 @@ struct octeon_device *lio_get_device(u32 octeon_id);
*/
int lio_get_device_id(void *dev);
-static inline u16 OCTEON_MAJOR_REV(struct octeon_device *oct)
-{
- u16 rev = (oct->rev_id & 0xC) >> 2;
-
- return (rev == 0) ? 1 : rev;
-}
-
-static inline u16 OCTEON_MINOR_REV(struct octeon_device *oct)
-{
- return oct->rev_id & 0x3;
-}
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 9d868403d86c..cbaa1924afbe 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -234,6 +234,11 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
/* Put it in the ring. */
p->rx_ring[p->rx_next_fill] = re.d64;
+ /* Make sure there is no reorder of filling the ring and ringing
+ * the bell
+ */
+ wmb();
+
dma_sync_single_for_device(p->dev, p->rx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index b4b33368698f..2ba0ce115e63 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -552,6 +552,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
xdp.rxq = &rq->xdp_rxq;
+ xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 9909bfda167e..82cdfa51ce37 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -26,7 +26,7 @@ config CHELSIO_T1
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
performance tuning is in
- <file:Documentation/networking/device_drivers/chelsio/cxgb.txt>.
+ <file:Documentation/networking/device_drivers/chelsio/cxgb.rst>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e46a14f44a6f..cf69c6edcfec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -60,6 +60,7 @@
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
extern struct list_head adapter_list;
+extern struct list_head uld_list;
extern struct mutex uld_mutex;
/* Suspend an Ethernet Tx queue with fewer available descriptors than this.
@@ -466,8 +467,6 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
return &((struct mbox_cmd *)&(log)[1])[entry_idx];
}
-#include "t4fw_api.h"
-
#define FW_VERSION(chip) ( \
FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
@@ -824,6 +823,13 @@ struct sge_uld_txq_info {
u16 ntxq; /* # of egress uld queues */
};
+/* struct to maintain ULD list to reallocate ULD resources on hotplug */
+struct cxgb4_uld_list {
+ struct cxgb4_uld_info uld_info;
+ struct list_head list_node;
+ enum cxgb4_uld uld_type;
+};
+
enum sge_eosw_state {
CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
@@ -1093,6 +1099,7 @@ struct adapter {
/* TC u32 offload */
struct cxgb4_tc_u32_table *tc_u32;
+ struct chcr_ktls chcr_ktls;
struct chcr_stats_debug chcr_stats;
/* TC flower offload */
@@ -1127,19 +1134,20 @@ struct adapter {
* programmed with various parameters.
*/
struct ch_sched_params {
- s8 type; /* packet or flow */
+ u8 type; /* packet or flow */
union {
struct {
- s8 level; /* scheduler hierarchy level */
- s8 mode; /* per-class or per-flow */
- s8 rateunit; /* bit or packet rate */
- s8 ratemode; /* %port relative or kbps absolute */
- s8 channel; /* scheduler channel [0..N] */
- s8 class; /* scheduler class [0..N] */
- s32 minrate; /* minimum rate */
- s32 maxrate; /* maximum rate */
- s16 weight; /* percent weight */
- s16 pktsize; /* average packet size */
+ u8 level; /* scheduler hierarchy level */
+ u8 mode; /* per-class or per-flow */
+ u8 rateunit; /* bit or packet rate */
+ u8 ratemode; /* %port relative or kbps absolute */
+ u8 channel; /* scheduler channel [0..N] */
+ u8 class; /* scheduler class [0..N] */
+ u32 minrate; /* minimum rate */
+ u32 maxrate; /* maximum rate */
+ u16 weight; /* percent weight */
+ u16 pktsize; /* average packet size */
+ u16 burstsize; /* burst buffer size */
} params;
} u;
};
@@ -1954,9 +1962,10 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
enum ctxt_type ctype, u32 *data);
int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
enum ctxt_type ctype, u32 *data);
-int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
- int rateunit, int ratemode, int channel, int class,
- int minrate, int maxrate, int weight, int pktsize);
+int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
+ u8 rateunit, u8 ratemode, u8 channel, u8 class,
+ u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
+ u16 burstsize);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_idma_monitor_init(struct adapter *adapter,
struct sge_idma_monitor_state *idma);
@@ -2052,4 +2061,7 @@ int cxgb_open(struct net_device *dev);
int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
+#endif
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index ebed99f3d4cf..41315712deb8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -49,6 +49,7 @@
#include "cudbg_lib_common.h"
#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#include "cxgb4_tc_mqprio.h"
/* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -1812,12 +1813,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
/* Inner header lookup */
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
seq_printf(seq,
- "%3u %02x:%02x:%02x:%02x:%02x:%02x "
- "%012llx %06x %06x - - %3c"
- " 'I' %4x "
- "%3c %#x%4u%4d", idx, addr[0],
- addr[1], addr[2], addr[3],
- addr[4], addr[5],
+ "%3u %pM %012llx %06x %06x - - %3c 'I' %4x %3c %#x%4u%4d",
+ idx, addr,
(unsigned long long)mask,
vniy, (vnix | vniy),
dip_hit ? 'Y' : 'N',
@@ -1829,10 +1826,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
T6_VF_G(cls_lo) : -1);
} else {
seq_printf(seq,
- "%3u %02x:%02x:%02x:%02x:%02x:%02x "
- "%012llx - - ",
- idx, addr[0], addr[1], addr[2],
- addr[3], addr[4], addr[5],
+ "%3u %pM %012llx - - ",
+ idx, addr,
(unsigned long long)mask);
if (vlan_vld)
@@ -1850,10 +1845,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
T6_VF_G(cls_lo) : -1);
}
} else
- seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
- "%012llx%3c %#x%4u%4d",
- idx, addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5], (unsigned long long)mask,
+ seq_printf(seq, "%3u %pM %012llx%3c %#x%4u%4d",
+ idx, addr, (unsigned long long)mask,
(cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
PORTMAP_G(cls_hi),
PF_G(cls_lo),
@@ -2657,32 +2650,19 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld)
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
- int eth_entries, ctrl_entries, eo_entries = 0;
+ int eth_entries, ctrl_entries, eohw_entries = 0, eosw_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
const struct sge_uld_txq_info *utxq_info;
const struct sge_uld_rxq_info *urxq_info;
+ struct cxgb4_tc_port_mqprio *port_mqprio;
struct adapter *adap = seq->private;
- int i, n, r = (uintptr_t)v - 1;
+ int i, j, n, r = (uintptr_t)v - 1;
struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
- if (adap->sge.eohw_txq)
- eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
-
- mutex_lock(&uld_mutex);
- if (s->uld_txq_info)
- for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
- uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
-
- if (s->uld_rxq_info) {
- for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
- uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
- uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
- }
- }
if (r)
seq_putc(seq, '\n');
@@ -2759,11 +2739,21 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- goto unlock;
+ goto out;
}
r -= eth_entries;
- if (r < eo_entries) {
+ if (!adap->tc_mqprio)
+ goto skip_mqprio;
+
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+ if (!refcount_read(&adap->tc_mqprio->refcnt)) {
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto skip_mqprio;
+ }
+
+ eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
+ if (r < eohw_entries) {
int base_qset = r * 4;
const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
@@ -2808,10 +2798,71 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- goto unlock;
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto out;
+ }
+
+ r -= eohw_entries;
+ for (j = 0; j < adap->params.nports; j++) {
+ int entries;
+ u8 tc;
+
+ port_mqprio = &adap->tc_mqprio->port_mqprio[j];
+ entries = 0;
+ for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
+ entries += port_mqprio->mqprio.qopt.count[tc];
+
+ if (!entries)
+ continue;
+
+ eosw_entries = DIV_ROUND_UP(entries, 4);
+ if (r < eosw_entries) {
+ const struct sge_eosw_txq *tx;
+
+ n = min(4, entries - 4 * r);
+ tx = &port_mqprio->eosw_txq[4 * r];
+
+ S("QType:", "EOSW-TXQ");
+ S("Interface:",
+ adap->port[j] ? adap->port[j]->name : "N/A");
+ T("EOTID:", hwtid);
+ T("HWQID:", hwqid);
+ T("State:", state);
+ T("Size:", ndesc);
+ T("In-Use:", inuse);
+ T("Credits:", cred);
+ T("Compl:", ncompl);
+ T("Last-Compl:", last_compl);
+ T("PIDX:", pidx);
+ T("Last-PIDX:", last_pidx);
+ T("CIDX:", cidx);
+ T("Last-CIDX:", last_cidx);
+ T("FLOWC-IDX:", flowc_idx);
+
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto out;
+ }
+
+ r -= eosw_entries;
+ }
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+
+skip_mqprio:
+ if (!is_uld(adap))
+ goto skip_uld;
+
+ mutex_lock(&uld_mutex);
+ if (s->uld_txq_info)
+ for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+ uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+ if (s->uld_rxq_info) {
+ for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+ uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+ uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+ }
}
- r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx;
@@ -2994,6 +3045,9 @@ do { \
}
r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+ mutex_unlock(&uld_mutex);
+
+skip_uld:
if (r < ctrl_entries) {
const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
@@ -3008,7 +3062,7 @@ do { \
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
- goto unlock;
+ goto out;
}
r -= ctrl_entries;
@@ -3026,11 +3080,9 @@ do { \
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
s->counter_val[evtq->pktcnt_idx]);
- goto unlock;
+ goto out;
}
-unlock:
- mutex_unlock(&uld_mutex);
#undef R
#undef RL
#undef T
@@ -3039,13 +3091,38 @@ unlock:
#undef R3
#undef T3
#undef S3
+out:
+ return 0;
+
+unlock:
+ mutex_unlock(&uld_mutex);
return 0;
}
static int sge_queue_entries(const struct adapter *adap)
{
- int tot_uld_entries = 0;
- int i;
+ int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
+
+ if (adap->tc_mqprio) {
+ struct cxgb4_tc_port_mqprio *port_mqprio;
+ u8 tc;
+
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+ if (adap->sge.eohw_txq)
+ eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
+
+ for (i = 0; i < adap->params.nports; i++) {
+ u32 entries = 0;
+
+ port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
+ entries += port_mqprio->mqprio.qopt.count[tc];
+
+ if (entries)
+ eosw_entries += DIV_ROUND_UP(entries, 4);
+ }
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ }
if (!is_uld(adap))
goto lld_only;
@@ -3062,8 +3139,7 @@ static int sge_queue_entries(const struct adapter *adap)
lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
- tot_uld_entries +
+ eohw_entries + eosw_entries + tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -3244,6 +3320,10 @@ static int tid_info_show(struct seq_file *seq, void *v)
if (t->nhpftids)
seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
t->hpftid_base + t->nhpftids - 1);
+ if (t->neotids)
+ seq_printf(seq, "EOTID range: %u..%u, in use: %u\n",
+ t->eotid_base, t->eotid_base + t->neotids - 1,
+ atomic_read(&t->eotids_in_use));
if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
@@ -3411,6 +3491,8 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.tls_key));
#ifdef CONFIG_CHELSIO_TLS_DEVICE
seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
+ seq_printf(seq, "Tx TLS offload refcount: %20u\n",
+ refcount_read(&adap->chcr_ktls.ktls_refcount));
seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_ctx));
seq_printf(seq, "Tx connection created: %20llu\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a70018f067aa..854b1717a70d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -66,6 +66,9 @@
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
#include <net/xfrm.h>
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+#include <net/tls.h>
+#endif
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -180,6 +183,7 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
+LIST_HEAD(uld_list);
static int cfg_queues(struct adapter *adap);
@@ -1579,6 +1583,7 @@ static int tid_init(struct tid_info *t)
atomic_set(&t->tids_in_use, 0);
atomic_set(&t->conns_in_use, 0);
atomic_set(&t->hash_tids_in_use, 0);
+ atomic_set(&t->eotids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
@@ -3021,7 +3026,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
SCHED_CLASS_RATEUNIT_BITS,
SCHED_CLASS_RATEMODE_ABS,
pi->tx_chan, class_id, 0,
- max_tx_rate * 1000, 0, pktsize);
+ max_tx_rate * 1000, 0, pktsize, 0);
if (ret) {
dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
ret);
@@ -6062,6 +6067,79 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
}
#endif /* CONFIG_PCI_IOV */
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+
+static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 tcp_sn)
+{
+ struct adapter *adap = netdev2adap(netdev);
+ int ret = 0;
+
+ mutex_lock(&uld_mutex);
+ if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
+ dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
+ if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
+ dev_err(adap->pdev_dev,
+ "chcr driver has no registered tlsdev_ops()\n");
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
+ ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
+ if (ret)
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk,
+ direction,
+ crypto_info,
+ tcp_sn);
+ /* if there is a failure, clear the refcount */
+ if (ret)
+ cxgb4_set_ktls_feature(adap,
+ FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+out_unlock:
+ mutex_unlock(&uld_mutex);
+ return ret;
+}
+
+static void cxgb4_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct adapter *adap = netdev2adap(netdev);
+
+ mutex_lock(&uld_mutex);
+ if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
+ dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
+ goto out_unlock;
+ }
+
+ if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
+ dev_err(adap->pdev_dev,
+ "chcr driver has no registered tlsdev_ops\n");
+ goto out_unlock;
+ }
+
+ adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+ direction);
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static const struct tlsdev_ops cxgb4_ktls_ops = {
+ .tls_dev_add = cxgb4_ktls_dev_add,
+ .tls_dev_del = cxgb4_ktls_dev_del,
+};
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
@@ -6311,7 +6389,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
-
+#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+ if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = &cxgb4_ktls_ops;
+ /* initialize the refcount */
+ refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
+ }
+#endif
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 81 - 9600 */
@@ -6518,11 +6603,8 @@ fw_attach_fail:
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
- if (is_uld(adapter)) {
- mutex_lock(&uld_mutex);
- list_add_tail(&adapter->list_node, &adapter_list);
- mutex_unlock(&uld_mutex);
- }
+ if (is_uld(adapter))
+ cxgb4_uld_enable(adapter);
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index e6af4906d674..ae7123a9de8e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -342,6 +342,13 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
+ /* Request larger burst buffer for smaller MTU, so
+ * that hardware can work on more data per burst
+ * cycle.
+ */
+ if (dev->mtu <= ETH_DATA_LEN)
+ p.u.params.burstsize = 8 * dev->mtu;
+
e = cxgb4_sched_class_alloc(dev, &p);
if (!e) {
ret = -ENOMEM;
@@ -567,6 +574,7 @@ static void cxgb4_mqprio_disable_offload(struct net_device *dev)
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
+ struct adapter *adap = netdev2adap(dev);
bool needs_bring_up = false;
int ret;
@@ -574,6 +582,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
if (ret)
return ret;
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+
/* To configure tc params, the current allocated EOTIDs must
* be freed up. However, they can't be freed up if there's
* traffic running on the interface. So, ensure interface is
@@ -609,6 +619,7 @@ out:
if (needs_bring_up)
cxgb_open(dev);
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret;
}
@@ -621,6 +632,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap)
if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
return;
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
for_each_port(adap, i) {
dev = adap->port[i];
if (!dev)
@@ -632,6 +644,7 @@ void cxgb4_mqprio_stop_offload(struct adapter *adap)
cxgb4_mqprio_disable_offload(dev);
}
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
}
int cxgb4_init_tc_mqprio(struct adapter *adap)
@@ -653,6 +666,8 @@ int cxgb4_init_tc_mqprio(struct adapter *adap)
goto out_free_mqprio;
}
+ mutex_init(&tc_mqprio->mqprio_mutex);
+
tc_mqprio->port_mqprio = tc_port_mqprio;
for (i = 0; i < adap->params.nports; i++) {
port_mqprio = &tc_mqprio->port_mqprio[i];
@@ -687,6 +702,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
u8 i;
if (adap->tc_mqprio) {
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
if (adap->tc_mqprio->port_mqprio) {
for (i = 0; i < adap->params.nports; i++) {
struct net_device *dev = adap->port[i];
@@ -698,6 +714,7 @@ void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
}
kfree(adap->tc_mqprio->port_mqprio);
}
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
kfree(adap->tc_mqprio);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index ff8794132b22..be96f1dc0372 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -33,6 +33,7 @@ struct cxgb4_tc_port_mqprio {
struct cxgb4_tc_mqprio {
refcount_t refcnt; /* Refcount for adapter-wide resources */
+ struct mutex mqprio_mutex; /* Lock for accessing MQPRIO info */
struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index e65b52375dd8..0307e9c69a47 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -174,13 +174,14 @@ static int
setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int i, ret = 0;
+ int i, ret;
- ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
+ ret = alloc_uld_rxqs(adap, rxq_info, lro);
+ if (ret)
+ return ret;
/* Tell uP to route control queue completions to rdma rspq */
- if (adap->flags & CXGB4_FULL_INIT_DONE &&
- !ret && uld_type == CXGB4_ULD_RDMA) {
+ if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
unsigned int cmplqid;
u32 param, cmdop;
@@ -662,25 +663,129 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0;
}
+static bool cxgb4_uld_in_use(struct adapter *adap)
+{
+ const struct tid_info *t = &adap->tids;
+
+ return (atomic_read(&t->conns_in_use) || t->stids_in_use);
+}
+
#ifdef CONFIG_CHELSIO_TLS_DEVICE
/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
* @adap: adapter info
* @enable: 1 to enable / 0 to disable ktls settings.
*/
-static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
+int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
{
- u32 params = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_TX_HW) |
- FW_PARAMS_PARAM_Y_V(enable));
int ret = 0;
+ u32 params =
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) |
+ FW_PARAMS_PARAM_Y_V(enable) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
+
+ if (enable) {
+ if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) {
+ /* At this moment if ULD connection are up means, other
+ * ULD is/are already active, return failure.
+ */
+ if (cxgb4_uld_in_use(adap)) {
+ dev_warn(adap->pdev_dev,
+ "ULD connections (tid/stid) active. Can't enable kTLS\n");
+ return -EINVAL;
+ }
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &params, &params);
+ if (ret)
+ return ret;
+ refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
+ pr_info("kTLS has been enabled. Restrictions placed on ULD support\n");
+ } else {
+ /* ktls settings already up, just increment refcount. */
+ refcount_inc(&adap->chcr_ktls.ktls_refcount);
+ }
+ } else {
+ /* return failure if refcount is already 0. */
+ if (!refcount_read(&adap->chcr_ktls.ktls_refcount))
+ return -EINVAL;
+ /* decrement refcount and test, if 0, disable ktls feature,
+ * else return command success.
+ */
+ if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) {
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &params, &params);
+ if (ret)
+ return ret;
+ pr_info("kTLS is disabled. Restrictions on ULD support removed\n");
+ }
+ }
- ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &params, &params);
- /* if fw returns failure, clear the ktls flag */
- if (ret)
- adap->params.crypto &= ~ULP_CRYPTO_KTLS_INLINE;
+ return ret;
}
#endif
+static void cxgb4_uld_alloc_resources(struct adapter *adap,
+ enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
+{
+ int ret = 0;
+
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ return;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ return;
+ ret = cfg_queues_uld(adap, type, p);
+ if (ret)
+ goto out;
+ ret = setup_sge_queues_uld(adap, type, p->lro);
+ if (ret)
+ goto free_queues;
+ if (adap->flags & CXGB4_USING_MSIX) {
+ ret = request_msix_queue_irqs_uld(adap, type);
+ if (ret)
+ goto free_rxq;
+ }
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ enable_rx_uld(adap, type);
+ if (adap->uld[type].add)
+ goto free_irq;
+ ret = setup_sge_txq_uld(adap, type, p);
+ if (ret)
+ goto free_irq;
+ adap->uld[type] = *p;
+ ret = uld_attach(adap, type);
+ if (ret)
+ goto free_txq;
+ return;
+free_txq:
+ release_sge_txq_uld(adap, type);
+free_irq:
+ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & CXGB4_USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+free_rxq:
+ free_sge_queues_uld(adap, type);
+free_queues:
+ free_queues_uld(adap, type);
+out:
+ dev_warn(adap->pdev_dev,
+ "ULD registration failed for uld type %d\n", type);
+}
+
+void cxgb4_uld_enable(struct adapter *adap)
+{
+ struct cxgb4_uld_list *uld_entry;
+
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adap->list_node, &adapter_list);
+ list_for_each_entry(uld_entry, &uld_list, list_node)
+ cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
+ &uld_entry->uld_info);
+ mutex_unlock(&uld_mutex);
+}
+
/* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
@@ -691,63 +796,23 @@ static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
void cxgb4_register_uld(enum cxgb4_uld type,
const struct cxgb4_uld_info *p)
{
+ struct cxgb4_uld_list *uld_entry;
struct adapter *adap;
- int ret = 0;
if (type >= CXGB4_ULD_MAX)
return;
+ uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
+ if (!uld_entry)
+ return;
+
+ memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
mutex_lock(&uld_mutex);
- list_for_each_entry(adap, &adapter_list, list_node) {
- if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
- (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
- continue;
- if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
- continue;
- ret = cfg_queues_uld(adap, type, p);
- if (ret)
- goto out;
- ret = setup_sge_queues_uld(adap, type, p->lro);
- if (ret)
- goto free_queues;
- if (adap->flags & CXGB4_USING_MSIX) {
- ret = request_msix_queue_irqs_uld(adap, type);
- if (ret)
- goto free_rxq;
- }
- if (adap->flags & CXGB4_FULL_INIT_DONE)
- enable_rx_uld(adap, type);
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- /* send mbox to enable ktls related settings. */
- if (type == CXGB4_ULD_CRYPTO &&
- (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
- cxgb4_set_ktls_feature(adap, 1);
-#endif
- if (adap->uld[type].add)
- goto free_irq;
- ret = setup_sge_txq_uld(adap, type, p);
- if (ret)
- goto free_irq;
- adap->uld[type] = *p;
- ret = uld_attach(adap, type);
- if (ret)
- goto free_txq;
- continue;
-free_txq:
- release_sge_txq_uld(adap, type);
-free_irq:
- if (adap->flags & CXGB4_FULL_INIT_DONE)
- quiesce_rx_uld(adap, type);
- if (adap->flags & CXGB4_USING_MSIX)
- free_msix_queue_irqs_uld(adap, type);
-free_rxq:
- free_sge_queues_uld(adap, type);
-free_queues:
- free_queues_uld(adap, type);
-out:
- dev_warn(adap->pdev_dev,
- "ULD registration failed for uld type %d\n", type);
- }
+ list_for_each_entry(adap, &adapter_list, list_node)
+ cxgb4_uld_alloc_resources(adap, type, p);
+
+ uld_entry->uld_type = type;
+ list_add_tail(&uld_entry->list_node, &uld_list);
mutex_unlock(&uld_mutex);
return;
}
@@ -761,6 +826,7 @@ EXPORT_SYMBOL(cxgb4_register_uld);
*/
int cxgb4_unregister_uld(enum cxgb4_uld type)
{
+ struct cxgb4_uld_list *uld_entry, *tmp;
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
@@ -775,13 +841,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
continue;
cxgb4_shutdown_uld_adapter(adap, type);
+ }
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- /* send mbox to disable ktls related settings. */
- if (type == CXGB4_ULD_CRYPTO &&
- (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
- cxgb4_set_ktls_feature(adap, 0);
-#endif
+ list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
+ if (uld_entry->uld_type == type) {
+ list_del(&uld_entry->list_node);
+ kfree(uld_entry);
+ }
}
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index be831317520a..dbce99b209d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -147,6 +147,9 @@ struct tid_info {
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
atomic_t conns_in_use;
+ /* ETHOFLD TIDs used for rate limiting */
+ atomic_t eotids_in_use;
+
/* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock;
@@ -221,12 +224,14 @@ static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data)
{
set_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = data;
+ atomic_inc(&t->eotids_in_use);
}
static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid)
{
clear_bit(eotid, t->eotid_bmap);
t->eotid_tab[eotid].data = NULL;
+ atomic_dec(&t->eotids_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
@@ -263,6 +268,10 @@ struct filter_ctx {
u32 tid; /* to store tid */
};
+struct chcr_ktls {
+ refcount_t ktls_refcount;
+};
+
struct ch_filter_specification;
int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
@@ -322,6 +331,7 @@ enum cxgb4_control {
CXGB4_CONTROL_DB_DROP,
};
+struct adapter;
struct pci_dev;
struct l2t_data;
struct net_device;
@@ -458,8 +468,12 @@ struct cxgb4_uld_info {
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ const struct tlsdev_ops *tlsdev_ops;
+#endif
};
+void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index cebe1412d960..fde93c50cfec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -57,7 +57,8 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
p->u.params.ratemode,
p->u.params.channel, e->idx,
p->u.params.minrate, p->u.params.maxrate,
- p->u.params.weight, p->u.params.pktsize);
+ p->u.params.weight, p->u.params.pktsize,
+ p->u.params.burstsize);
break;
default:
err = -ENOTSUPP;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6516c45864b3..1359158652b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2091,10 +2091,9 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
return flits + nsgl;
}
-static inline void *write_eo_wr(struct adapter *adap,
- struct sge_eosw_txq *eosw_txq,
- struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
- u32 hdr_len, u32 wrlen)
+static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len, u32 wrlen)
{
const struct skb_shared_info *ssi = skb_shinfo(skb);
struct cpl_tx_pkt_core *cpl;
@@ -2113,7 +2112,8 @@ static inline void *write_eo_wr(struct adapter *adap,
immd_len += hdr_len;
if (!eosw_txq->ncompl ||
- eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
+ (eosw_txq->last_compl + wrlen16) >=
+ (adap->params.ofldq_wr_cred / 2)) {
compl = true;
eosw_txq->ncompl++;
eosw_txq->last_compl = 0;
@@ -2153,8 +2153,8 @@ static inline void *write_eo_wr(struct adapter *adap,
return cpl;
}
-static void ethofld_hard_xmit(struct net_device *dev,
- struct sge_eosw_txq *eosw_txq)
+static int ethofld_hard_xmit(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
@@ -2167,8 +2167,8 @@ static void ethofld_hard_xmit(struct net_device *dev,
bool skip_eotx_wr = false;
struct tx_sw_desc *d;
struct sk_buff *skb;
+ int left, ret = 0;
u8 flits, ndesc;
- int left;
eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
spin_lock(&eohw_txq->lock);
@@ -2198,11 +2198,19 @@ static void ethofld_hard_xmit(struct net_device *dev,
wrlen = flits * 8;
wrlen16 = DIV_ROUND_UP(wrlen, 16);
- /* If there are no CPL credits, then wait for credits
- * to come back and retry again
+ left = txq_avail(&eohw_txq->q) - ndesc;
+
+ /* If there are no descriptors left in hardware queues or no
+ * CPL credits left in software queues, then wait for them
+ * to come back and retry again. Note that we always request
+ * for credits update via interrupt for every half credits
+ * consumed. So, the interrupt will eventually restore the
+ * credits and invoke the Tx path again.
*/
- if (unlikely(wrlen16 > eosw_txq->cred))
+ if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
+ ret = -ENOMEM;
goto out_unlock;
+ }
if (unlikely(skip_eotx_wr)) {
start = (u64 *)wr;
@@ -2231,7 +2239,8 @@ write_wr_headers:
sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
hdr_len);
if (data_len) {
- if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
+ ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
+ if (unlikely(ret)) {
memset(d->addr, 0, sizeof(d->addr));
eohw_txq->mapping_err++;
goto out_unlock;
@@ -2277,12 +2286,13 @@ write_wr_headers:
out_unlock:
spin_unlock(&eohw_txq->lock);
+ return ret;
}
static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
{
struct sk_buff *skb;
- int pktcount;
+ int pktcount, ret;
switch (eosw_txq->state) {
case CXGB4_EO_STATE_ACTIVE:
@@ -2307,7 +2317,9 @@ static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
continue;
}
- ethofld_hard_xmit(dev, eosw_txq);
+ ret = ethofld_hard_xmit(dev, eosw_txq);
+ if (ret)
+ break;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2a3480fc1d91..1c8068c02728 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -10361,9 +10361,10 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
return ret;
}
-int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
- int rateunit, int ratemode, int channel, int class,
- int minrate, int maxrate, int weight, int pktsize)
+int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
+ u8 rateunit, u8 ratemode, u8 channel, u8 class,
+ u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
+ u16 burstsize)
{
struct fw_sched_cmd cmd;
@@ -10385,6 +10386,7 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
cmd.u.params.max = cpu_to_be32(maxrate);
cmd.u.params.weight = cpu_to_be16(weight);
cmd.u.params.pktsize = cpu_to_be16(pktsize);
+ cmd.u.params.burstsize = cpu_to_be16(burstsize);
return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
NULL, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 68fe734b9b37..0a326c054707 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1205,7 +1205,7 @@ enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLS_INLINE = 0x00000002,
FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
- FW_CAPS_CONFIG_TX_TLS_HW = 0x00000008,
+ FW_CAPS_CONFIG_TLS_HW = 0x00000008,
};
enum fw_caps_config_fcoe {
@@ -1329,7 +1329,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
- FW_PARAMS_PARAM_DEV_KTLS_TX_HW = 0x31,
+ FW_PARAMS_PARAM_DEV_KTLS_HW = 0x31,
};
/*
@@ -1412,6 +1412,12 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
};
+enum fw_params_param_dev_ktls_hw {
+ FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE = 0x00,
+ FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE = 0x01,
+ FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE = 0x01,
+};
+
enum fw_params_param_dev_phyfw {
FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9cc3541a7e1c..cec865a97464 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2480,7 +2480,7 @@ static int setup_debugfs(struct adapter *adapter)
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
debugfs_create_file(debugfs_files[i].name,
debugfs_files[i].mode,
- adapter->debugfs_root, (void *)adapter,
+ adapter->debugfs_root, adapter,
debugfs_files[i].fops);
return 0;
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 48f3198381bc..8d845f5ee0c5 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -24,7 +24,7 @@ config CS89x0
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
- <file:Documentation/networking/device_drivers/cirrus/cs89x0.txt>.
+ <file:Documentation/networking/device_drivers/cirrus/cs89x0.rst>.
To compile this driver as a module, choose M here. The module
will be called cs89x0.
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5bff5c2be88b..8d13ea370db1 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1224,7 +1224,8 @@ map_error:
return -ENOMEM;
}
-static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t gmac_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
unsigned short m = (1 << port->txq_order) - 1;
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 8ce6888ea722..177f36f4b89d 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -114,7 +114,7 @@ config DE4X5
These include the DE425, DE434, DE435, DE450 and DE500 models. If
you have a network card of this type, say Y. More specific
information is contained in
- <file:Documentation/networking/device_drivers/dec/de4x5.txt>.
+ <file:Documentation/networking/device_drivers/dec/de4x5.rst>.
To compile this driver as a module, choose M here. The module will
be called de4x5.
@@ -138,7 +138,7 @@ config DM9102
This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
Davicom (<http://www.davicom.com.tw/>). If you have such a network
(Ethernet) card, say Y. Some information is contained in the file
- <file:Documentation/networking/device_drivers/dec/dmfe.txt>.
+ <file:Documentation/networking/device_drivers/dec/dmfe.rst>.
To compile this driver as a module, choose M here. The module will
be called dmfe.
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f16853c3c851..0ccd9994ad45 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -951,7 +951,7 @@ static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 si
static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
static int test_tp(struct net_device *dev, s32 msec);
static int EISA_signature(char *name, struct device *device);
-static int PCI_signature(char *name, struct de4x5_private *lp);
+static void PCI_signature(char *name, struct de4x5_private *lp);
static void DevicePresent(struct net_device *dev, u_long iobase);
static void enet_addr_rst(u_long aprom_addr);
static int de4x5_bad_srom(struct de4x5_private *lp);
@@ -3902,14 +3902,14 @@ EISA_signature(char *name, struct device *device)
/*
** Look for a particular board name in the PCI configuration space
*/
-static int
+static void
PCI_signature(char *name, struct de4x5_private *lp)
{
- int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
+ int i, siglen = ARRAY_SIZE(de4x5_signatures);
if (lp->chipset == DC21040) {
strcpy(name, "DE434/5");
- return status;
+ return;
} else { /* Search for a DEC name in the SROM */
int tmp = *((char *)&lp->srom + 19) * 3;
strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
@@ -3935,8 +3935,6 @@ PCI_signature(char *name, struct de4x5_private *lp)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
lp->useSROM = true;
}
-
- return status;
}
/*
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 643090555cc7..5143722c4419 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1869,7 +1869,7 @@ Compile command:
gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
-Read Documentation/networking/device_drivers/dlink/dl2k.txt for details.
+Read Documentation/networking/device_drivers/dlink/dl2k.rst for details.
*/
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 057a508dd6e2..db98274501a0 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -776,8 +776,7 @@ static int dnet_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bp->regs = devm_ioremap_resource(&pdev->dev, res);
+ bp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(bp->regs)) {
err = PTR_ERR(bp->regs);
goto err_out_free_dev;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 32cf54f0e35b..473b337b2e3b 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1057,9 +1057,6 @@ static int ftmac100_probe(struct platform_device *pdev)
struct ftmac100 *priv;
int err;
- if (!pdev)
- return -ENODEV;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6bfa7575af94..2972244e6eb0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2107,7 +2107,7 @@ workaround:
/* Workaround for DPAA_A050385 requires data start to be aligned */
start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
- if (start - new_skb->data != 0)
+ if (start - new_skb->data)
skb_reserve(new_skb, start - new_skb->data);
skb_put(new_skb, skb->len);
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index c6fb8e4021ac..feea797cde02 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -9,6 +9,16 @@ config FSL_DPAA2_ETH
The driver manages network objects discovered on the Freescale
MC bus.
+if FSL_DPAA2_ETH
+config FSL_DPAA2_ETH_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on DCB
+ help
+ Enable Priority-Based Flow Control (PFC) support for DPAA2 Ethernet
+ devices.
+endif
+
config FSL_DPAA2_PTP_CLOCK
tristate "Freescale DPAA2 PTP Clock"
depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 69184ca3b7b9..6e7f33c956bf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
new file mode 100644
index 000000000000..83dee575c2fa
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2020 NXP */
+
+#include "dpaa2-eth.h"
+
+static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!(priv->link_state.options & DPNI_LINK_OPT_PFC_PAUSE))
+ return 0;
+
+ memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
+ pfc->pfc_cap = dpaa2_eth_tc_count(priv);
+
+ return 0;
+}
+
+static inline bool is_prio_enabled(u8 pfc_en, u8 tc)
+{
+ return !!(pfc_en & (1 << tc));
+}
+
+static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
+{
+ struct dpni_congestion_notification_cfg cfg = {0};
+ int i, err;
+
+ cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
+ cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cfg.message_iova = 0ULL;
+ cfg.message_ctx = 0ULL;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (is_prio_enabled(pfc_en, i)) {
+ cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
+ cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
+ } else {
+ /* For priorities not set in the pfc_en mask, we leave
+ * the congestion thresholds at zero, which effectively
+ * disables generation of PFC frames for them
+ */
+ cfg.threshold_entry = 0;
+ cfg.threshold_exit = 0;
+ }
+
+ err = dpni_set_congestion_notification(priv->mc_io, 0,
+ priv->mc_token,
+ DPNI_QUEUE_RX, i, &cfg);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_congestion_notification failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
+ struct ieee_pfc *pfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg link_cfg = {0};
+ bool tx_pause;
+ int err;
+
+ if (pfc->mbc || pfc->delay)
+ return -EOPNOTSUPP;
+
+ /* If same PFC enabled mask, nothing to do */
+ if (priv->pfc.pfc_en == pfc->pfc_en)
+ return 0;
+
+ /* We allow PFC configuration even if it won't have any effect until
+ * general pause frames are enabled
+ */
+ tx_pause = dpaa2_eth_tx_pause_enabled(priv->link_state.options);
+ if (!dpaa2_eth_rx_pause_enabled(priv->link_state.options) || !tx_pause)
+ netdev_warn(net_dev, "Pause support must be enabled in order for PFC to work!\n");
+
+ link_cfg.rate = priv->link_state.rate;
+ link_cfg.options = priv->link_state.options;
+ if (pfc->pfc_en)
+ link_cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
+ else
+ link_cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_cfg failed\n");
+ return err;
+ }
+
+ /* Configure congestion notifications for the enabled priorities */
+ err = set_pfc_cn(priv, pfc->pfc_en);
+ if (err)
+ return err;
+
+ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+ priv->pfc_enabled = !!pfc->pfc_en;
+
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
+
+ return 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return priv->dcbx_mode;
+}
+
+static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return (mode != (priv->dcbx_mode)) ? 1 : 0;
+}
+
+static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->dcbx_mode;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
+ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
+ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
+ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
+ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
+ .getcap = dpaa2_eth_dcbnl_getcap,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index a9afe46b837f..c453a23045c1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
int i, err;
seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s\n",
- "VFQID", "CPU", "Type", "Frames", "Pending frames");
+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i];
@@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
if (err)
fcnt = 0;
- seq_printf(file, "%5d%16d%16s%16llu%16u\n",
+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
fq->fqid,
fq->target_cpu,
+ fq->tc,
fq_type_to_str(fq),
fq->stats.frames,
fcnt);
@@ -127,16 +128,19 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
int i;
seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s\n",
- "CHID", "CPU", "Deq busy", "CDANs", "Buf count");
+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
+ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+ "Avg Frm/CDAN", "Buf count");
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- seq_printf(file, "%4d%16d%16llu%16llu%16d\n",
+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
ch->ch_id,
ch->nctx.desired_cpu,
ch->stats.dequeue_portal_busy,
+ ch->stats.frames,
ch->stats.cdan,
+ div64_u64(ch->stats.frames, ch->stats.cdan),
ch->buf_count);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index d97c320a2dc0..8fb48de5d18c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2020 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -244,13 +244,72 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
ch->xdp.drop_cnt = 0;
}
-static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
- void *buf_start, u16 queue_id)
+static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_eth_xdp_fds *xdp_fds)
+{
+ int total_enqueued = 0, retries = 0, enqueued;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ int num_fds, err, max_retries;
+ struct dpaa2_fd *fds;
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ /* try to enqueue all the FDs until the max number of retries is hit */
+ fds = xdp_fds->fds;
+ num_fds = xdp_fds->num;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued],
+ 0, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ percpu_extras->tx_portal_busy += ++retries;
+ continue;
+ }
+ total_enqueued += enqueued;
+ }
+ xdp_fds->num = 0;
+
+ return total_enqueued;
+}
+
+static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_fd *fds;
+ int enqueued, i;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ // enqueue the array of XDP_TX frames
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ fds = fq->xdp_tx_fds.fds;
+ for (i = 0; i < enqueued; i++) {
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ ch->stats.xdp_tx++;
+ }
+ for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
+ xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ percpu_stats->tx_errors++;
+ ch->stats.xdp_tx_err++;
+ }
+ fq->xdp_tx_fds.num = 0;
+}
+
+static void xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
{
- struct dpaa2_eth_fq *fq;
struct dpaa2_faead *faead;
+ struct dpaa2_fd *dest_fd;
+ struct dpaa2_eth_fq *fq;
u32 ctrl, frc;
- int i, err;
/* Mark the egress frame hardware annotation area as valid */
frc = dpaa2_fd_get_frc(fd);
@@ -267,13 +326,13 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
faead->conf_fqid = 0;
fq = &priv->fq[queue_id];
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, fd, 0);
- if (err != -EBUSY)
- break;
- }
+ dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
+ memcpy(dest_fd, fd, sizeof(*dest_fd));
- return err;
+ if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
+ return;
+
+ xdp_tx_flush(priv, ch, fq);
}
static u32 run_xdp(struct dpaa2_eth_priv *priv,
@@ -282,14 +341,11 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
struct dpaa2_fd *fd, void *vaddr)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
- struct rtnl_link_stats64 *percpu_stats;
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
u32 xdp_act = XDP_PASS;
int err;
- percpu_stats = this_cpu_ptr(priv->percpu_stats);
-
rcu_read_lock();
xdp_prog = READ_ONCE(ch->xdp.prog);
@@ -302,6 +358,9 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
xdp_set_data_meta_invalid(&xdp);
xdp.rxq = &ch->xdp_rxq;
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
+ (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
/* xdp.data pointer may have changed */
@@ -312,16 +371,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
case XDP_PASS:
break;
case XDP_TX:
- err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
- if (err) {
- xdp_release_buf(priv, ch, addr);
- percpu_stats->tx_errors++;
- ch->stats.xdp_tx_err++;
- } else {
- percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
- ch->stats.xdp_tx++;
- }
+ xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
bpf_warn_invalid_xdp_action(xdp_act);
@@ -337,7 +387,11 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
dma_unmap_page(priv->net_dev->dev.parent, addr,
priv->rx_buf_size, DMA_BIDIRECTIONAL);
ch->buf_count--;
+
+ /* Allow redirect use of full headroom */
xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
+
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
if (unlikely(err))
ch->stats.xdp_drop++;
@@ -493,6 +547,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
return 0;
fq->stats.frames += cleaned;
+ ch->stats.frames += cleaned;
/* A dequeue operation only pulls frames from a single queue
* into the store. Return the frame queue as an out param.
@@ -847,7 +902,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
* the Tx confirmation callback for this frame
*/
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, prio);
+ err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
if (err != -EBUSY)
break;
}
@@ -1138,6 +1193,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
int store_cleaned, work_done;
struct list_head rx_list;
int retries = 0;
+ u16 flowid;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -1160,6 +1216,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
break;
if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
+ flowid = fq->flowid;
} else {
txconf_cleaned += store_cleaned;
/* We have a single Tx conf FQ on this channel */
@@ -1202,6 +1259,8 @@ out:
if (ch->xdp.res & XDP_REDIRECT)
xdp_do_flush_map();
+ else if (rx_cleaned && ch->xdp.res & XDP_TX)
+ xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
}
@@ -1228,31 +1287,67 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
-static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
+void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause, bool pfc)
{
struct dpni_taildrop td = {0};
+ struct dpaa2_eth_fq *fq;
int i, err;
- if (priv->rx_td_enabled == enable)
- return;
+ /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
+ * flow control is disabled (as it might interfere with either the
+ * buffer pool depletion trigger for pause frames or with the group
+ * congestion trigger for PFC frames)
+ */
+ td.enable = !tx_pause;
+ if (priv->rx_fqtd_enabled == td.enable)
+ goto set_cgtd;
- td.enable = enable;
- td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+ td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
+ td.units = DPNI_CONGESTION_UNIT_BYTES;
for (i = 0; i < priv->num_fqs; i++) {
- if (priv->fq[i].type != DPAA2_RX_FQ)
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_RX_FQ)
continue;
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
- DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
- priv->fq[i].flowid, &td);
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
+ fq->tc, fq->flowid, &td);
if (err) {
netdev_err(priv->net_dev,
- "dpni_set_taildrop() failed\n");
- break;
+ "dpni_set_taildrop(FQ) failed\n");
+ return;
+ }
+ }
+
+ priv->rx_fqtd_enabled = td.enable;
+
+set_cgtd:
+ /* Congestion group taildrop: threshold is in frames, per group
+ * of FQs belonging to the same traffic class
+ * Enabled if general Tx pause disabled or if PFCs are enabled
+ * (congestion group threhsold for PFC generation is lower than the
+ * CG taildrop threshold, so it won't interfere with it; we also
+ * want frames in non-PFC enabled traffic classes to be kept in check)
+ */
+ td.enable = !tx_pause || (tx_pause && pfc);
+ if (priv->rx_cgtd_enabled == td.enable)
+ return;
+
+ td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
+ td.units = DPNI_CONGESTION_UNIT_FRAMES;
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_GROUP, DPNI_QUEUE_RX,
+ i, 0, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop(CG) failed\n");
+ return;
}
}
- priv->rx_td_enabled = enable;
+ priv->rx_cgtd_enabled = td.enable;
}
static int link_state_update(struct dpaa2_eth_priv *priv)
@@ -1272,9 +1367,8 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
* Rx FQ taildrop configuration as well. We configure taildrop
* only when pause frame generation is disabled.
*/
- tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
- !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
- dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+ tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
@@ -1880,20 +1974,16 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return 0;
}
-static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
- struct xdp_frame *xdpf)
+static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
+ struct xdp_frame *xdpf,
+ struct dpaa2_fd *fd)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
- struct rtnl_link_stats64 *percpu_stats;
- struct dpaa2_eth_drv_stats *percpu_extras;
unsigned int needed_headroom;
struct dpaa2_eth_swa *swa;
- struct dpaa2_eth_fq *fq;
- struct dpaa2_fd fd;
void *buffer_start, *aligned_start;
dma_addr_t addr;
- int err, i;
/* We require a minimum headroom to be able to transmit the frame.
* Otherwise return an error and let the original net_device handle it
@@ -1902,11 +1992,8 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
if (xdpf->headroom < needed_headroom)
return -EINVAL;
- percpu_stats = this_cpu_ptr(priv->percpu_stats);
- percpu_extras = this_cpu_ptr(priv->percpu_extras);
-
/* Setup the FD fields */
- memset(&fd, 0, sizeof(fd));
+ memset(fd, 0, sizeof(*fd));
/* Align FD address, if possible */
buffer_start = xdpf->data - needed_headroom;
@@ -1924,32 +2011,14 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
addr = dma_map_single(dev, buffer_start,
swa->xdp.dma_size,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr))) {
- percpu_stats->tx_dropped++;
+ if (unlikely(dma_mapping_error(dev, addr)))
return -ENOMEM;
- }
-
- dpaa2_fd_set_addr(&fd, addr);
- dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
- dpaa2_fd_set_len(&fd, xdpf->len);
- dpaa2_fd_set_format(&fd, dpaa2_fd_single);
- dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
-
- fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, 0);
- if (err != -EBUSY)
- break;
- }
- percpu_extras->tx_portal_busy += i;
- if (unlikely(err < 0)) {
- percpu_stats->tx_errors++;
- /* let the Rx device handle the cleanup */
- return err;
- }
- percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
+ dpaa2_fd_set_len(fd, xdpf->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
return 0;
}
@@ -1957,8 +2026,12 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags)
{
- int drops = 0;
- int i, err;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd *fds;
+ int enqueued, i, err;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -1966,17 +2039,31 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
if (!netif_running(net_dev))
return -ENETDOWN;
- for (i = 0; i < n; i++) {
- struct xdp_frame *xdpf = frames[i];
+ fq = &priv->fq[smp_processor_id()];
+ xdp_redirect_fds = &fq->xdp_redirect_fds;
+ fds = xdp_redirect_fds->fds;
- err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ /* create a FD for each xdp_frame in the list received */
+ for (i = 0; i < n; i++) {
+ err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
+ if (err)
+ break;
}
+ xdp_redirect_fds->num = i;
+
+ /* enqueue all the frame descriptors */
+ enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
+
+ /* update statistics */
+ percpu_stats->tx_packets += enqueued;
+ for (i = 0; i < enqueued; i++)
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ for (i = enqueued; i < n; i++)
+ xdp_return_frame_rx_napi(frames[i]);
- return n - drops;
+ return enqueued;
}
static int update_xps(struct dpaa2_eth_priv *priv)
@@ -2018,7 +2105,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev,
int i;
if (type != TC_SETUP_QDISC_MQPRIO)
- return -EINVAL;
+ return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_queues = dpaa2_eth_queue_count(priv);
@@ -2030,7 +2117,7 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev,
if (num_tc > dpaa2_eth_tc_count(priv)) {
netdev_err(net_dev, "Max %d traffic classes supported\n",
dpaa2_eth_tc_count(priv));
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (!num_tc) {
@@ -2355,7 +2442,7 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
static void setup_fqs(struct dpaa2_eth_priv *priv)
{
- int i;
+ int i, j;
/* We have one TxConf FQ per Tx flow.
* The number of Tx and Rx queues is the same.
@@ -2367,10 +2454,13 @@ static void setup_fqs(struct dpaa2_eth_priv *priv)
priv->fq[priv->num_fqs++].flowid = (u16)i;
}
- for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
- priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
- priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
- priv->fq[priv->num_fqs++].flowid = (u16)i;
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+ priv->fq[priv->num_fqs].tc = (u8)j;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
}
/* For each FQ, decide on which core to process incoming frames */
@@ -2528,19 +2618,38 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio)
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames __always_unused,
+ int *frames_enqueued)
{
- return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
- priv->tx_qdid, prio,
- fq->tx_qdbin, fd);
+ int err;
+
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, prio,
+ fq->tx_qdbin, fd);
+ if (!err && frames_enqueued)
+ *frames_enqueued = 1;
+ return err;
}
-static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio)
+static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd,
+ u8 prio, u32 num_frames,
+ int *frames_enqueued)
{
- return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
- fq->tx_fqid[prio], fd);
+ int err;
+
+ err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
+ fq->tx_fqid[prio],
+ fd, num_frames);
+
+ if (err == 0)
+ return -EBUSY;
+
+ if (frames_enqueued)
+ *frames_enqueued = err;
+ return 0;
}
static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
@@ -2549,7 +2658,7 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
priv->enqueue = dpaa2_eth_enqueue_qd;
else
- priv->enqueue = dpaa2_eth_enqueue_fq;
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
}
static int set_pause(struct dpaa2_eth_priv *priv)
@@ -2610,7 +2719,7 @@ static void update_tx_fqids(struct dpaa2_eth_priv *priv)
}
}
- priv->enqueue = dpaa2_eth_enqueue_fq;
+ priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
return;
@@ -2620,6 +2729,118 @@ out_err:
priv->enqueue = dpaa2_eth_enqueue_qd;
}
+/* Configure ingress classification based on VLAN PCP */
+static int set_vlan_qos(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpkg_profile_cfg kg_cfg = {0};
+ struct dpni_qos_tbl_cfg qos_cfg = {0};
+ struct dpni_rule_cfg key_params;
+ void *dma_mem, *key, *mask;
+ u8 key_size = 2; /* VLAN TCI field */
+ int i, pcp, err;
+
+ /* VLAN-based classification only makes sense if we have multiple
+ * traffic classes.
+ * Also, we need to extract just the 3-bit PCP field from the VLAN
+ * header and we can only do that by using a mask
+ */
+ if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
+ dev_dbg(dev, "VLAN-based QoS classification not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ kg_cfg.num_extracts = 1;
+ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
+ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
+ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
+ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+
+ err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
+ if (err) {
+ dev_err(dev, "dpni_prepare_key_cfg failed\n");
+ goto out_free_tbl;
+ }
+
+ /* set QoS table */
+ qos_cfg.default_tc = 0;
+ qos_cfg.discard_on_miss = 0;
+ qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
+ DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
+ dev_err(dev, "QoS table DMA mapping failed\n");
+ err = -ENOMEM;
+ goto out_free_tbl;
+ }
+
+ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_qos_table failed\n");
+ goto out_unmap_tbl;
+ }
+
+ /* Add QoS table entries */
+ key = kzalloc(key_size * 2, GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto out_unmap_tbl;
+ }
+ mask = key + key_size;
+ *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
+
+ key_params.key_iova = dma_map_single(dev, key, key_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_params.key_iova)) {
+ dev_err(dev, "Qos table entry DMA mapping failed\n");
+ err = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_params.mask_iova = key_params.key_iova + key_size;
+ key_params.key_size = key_size;
+
+ /* We add rules for PCP-based distribution starting with highest
+ * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
+ * classes to accommodate all priority levels, the lowest ones end up
+ * on TC 0 which was configured as default
+ */
+ for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
+ *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
+ dma_sync_single_for_device(dev, key_params.key_iova,
+ key_size * 2, DMA_TO_DEVICE);
+
+ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
+ &key_params, i, i);
+ if (err) {
+ dev_err(dev, "dpni_add_qos_entry failed\n");
+ dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
+ goto out_unmap_key;
+ }
+ }
+
+ priv->vlan_cls_enabled = true;
+
+ /* Table and key memory is not persistent, clean everything up after
+ * configuration is finished
+ */
+out_unmap_key:
+ dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
+out_free_key:
+ kfree(key);
+out_unmap_tbl:
+ dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+out_free_tbl:
+ kfree(dma_mem);
+
+ return err;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -2682,10 +2903,16 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
goto close;
}
+ err = set_vlan_qos(priv);
+ if (err && err != -EOPNOTSUPP)
+ goto close;
+
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
- if (!priv->cls_rules)
+ if (!priv->cls_rules) {
+ err = -ENOMEM;
goto close;
+ }
return 0;
@@ -2716,7 +2943,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
int err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
if (err) {
dev_err(dev, "dpni_get_queue(RX) failed\n");
return err;
@@ -2729,7 +2956,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid,
+ DPNI_QUEUE_RX, fq->tc, fq->flowid,
DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
&queue);
if (err) {
@@ -2738,6 +2965,10 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
}
/* xdp_rxq setup */
+ /* only once for each channel */
+ if (fq->tc > 0)
+ return 0;
+
err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
fq->flowid);
if (err) {
@@ -2875,7 +3106,7 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_tc_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2883,9 +3114,14 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
+ i, &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+ break;
+ }
+ }
return err;
}
@@ -2895,7 +3131,7 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2903,9 +3139,15 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
- err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+ break;
+ }
+ }
return err;
}
@@ -2915,7 +3157,7 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
- int err;
+ int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
@@ -2923,9 +3165,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
- err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
- if (err)
- dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
+ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
+ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+ break;
+ }
+ }
return err;
}
@@ -3611,6 +3859,15 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_alloc_rings;
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
+ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
+ } else {
+ dev_dbg(dev, "PFC not supported\n");
+ }
+#endif
+
err = setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 13242bf5b427..2d7ada0f0dbd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2020 NXP
*/
#ifndef __DPAA2_ETH_H
#define __DPAA2_ETH_H
+#include <linux/dcbnl.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/fsl/mc.h>
@@ -36,27 +37,46 @@
/* Convert L3 MTU to L2 MFL */
#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
-/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
- * frames in the Rx queues (length of the current frame is not
- * taken into account when making the taildrop decision)
+/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
+ * enough number of jumbo frames in the Rx queues (length of the current
+ * frame is not taken into account when making the taildrop decision)
*/
-#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+#define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
/* Maximum number of Tx confirmation frames to be processed
* in a single NAPI call
*/
#define DPAA2_ETH_TXCONF_PER_NAPI 256
-/* Buffer quota per queue. Must be large enough such that for minimum sized
- * frames taildrop kicks in before the bpool gets depleted, so we compute
- * how many 64B frames fit inside the taildrop threshold and add a margin
- * to accommodate the buffer refill delay.
+/* Buffer qouta per channel. We want to keep in check number of ingress frames
+ * in flight: for small sized frames, congestion group taildrop may kick in
+ * first; for large sizes, Rx FQ taildrop threshold will ensure only a
+ * reasonable number of frames will be pending at any given time.
+ * Ingress frame drop due to buffer pool depletion should be a corner case only
*/
-#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
-#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+#define DPAA2_ETH_NUM_BUFS 1280
#define DPAA2_ETH_REFILL_THRESH \
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
+/* Congestion group taildrop threshold: number of frames allowed to accumulate
+ * at any moment in a group of Rx queues belonging to the same traffic class.
+ * Choose value such that we don't risk depleting the buffer pool before the
+ * taildrop kicks in
+ */
+#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
+ (1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
+
+/* Congestion group notification threshold: when this many frames accumulate
+ * on the Rx queues belonging to the same TC, the MAC is instructed to send
+ * PFC frames for that TC.
+ * When number of pending frames drops below exit threshold transmission of
+ * PFC frames is stopped.
+ */
+#define DPAA2_ETH_CN_THRESH_ENTRY(priv) \
+ (DPAA2_ETH_CG_TAILDROP_THRESH(priv) / 2)
+#define DPAA2_ETH_CN_THRESH_EXIT(priv) \
+ (DPAA2_ETH_CN_THRESH_ENTRY(priv) * 3 / 4)
+
/* Maximum number of buffers that can be acquired/released through a single
* QBMan command
*/
@@ -288,11 +308,15 @@ struct dpaa2_eth_ch_stats {
__u64 xdp_tx;
__u64 xdp_tx_err;
__u64 xdp_redirect;
+ /* Must be last, does not show up in ethtool stats */
+ __u64 frames;
};
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
-#define DPAA2_ETH_MAX_RX_QUEUES 16
+#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
+#define DPAA2_ETH_MAX_RX_QUEUES \
+ (DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_TX_QUEUES 16
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES)
@@ -308,6 +332,11 @@ enum dpaa2_eth_fq_type {
struct dpaa2_eth_priv;
+struct dpaa2_eth_xdp_fds {
+ struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
+ ssize_t num;
+};
+
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
@@ -325,6 +354,9 @@ struct dpaa2_eth_fq {
const struct dpaa2_fd *fd,
struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
+
+ struct dpaa2_eth_xdp_fds xdp_redirect_fds;
+ struct dpaa2_eth_xdp_fds xdp_tx_fds;
};
struct dpaa2_eth_ch_xdp {
@@ -371,7 +403,9 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
int (*enqueue)(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
- struct dpaa2_fd *fd, u8 prio);
+ struct dpaa2_fd *fd, u8 prio,
+ u32 num_frames,
+ int *frames_enqueued);
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
@@ -402,7 +436,8 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_drv_stats __percpu *percpu_extras;
u16 mc_token;
- u8 rx_td_enabled;
+ u8 rx_fqtd_enabled;
+ u8 rx_cgtd_enabled;
struct dpni_link_state link_state;
bool do_link_poll;
@@ -413,6 +448,12 @@ struct dpaa2_eth_priv {
u64 rx_cls_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
+ u8 vlan_cls_enabled;
+ u8 pfc_enabled;
+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
+ u8 dcbx_mode;
+ struct ieee_pfc pfc;
+#endif
struct bpf_prog *xdp_prog;
#ifdef CONFIG_DEBUG_FS
struct dpaa2_debugfs dbg;
@@ -495,6 +536,17 @@ enum dpaa2_eth_rx_dist {
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
DPNI_PAUSE_VER_MINOR) >= 0)
+static inline bool dpaa2_eth_tx_pause_enabled(u64 link_options)
+{
+ return !!(link_options & DPNI_LINK_OPT_PAUSE) ^
+ !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+}
+
+static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
+{
+ return !!(link_options & DPNI_LINK_OPT_PAUSE);
+}
+
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
@@ -534,4 +586,9 @@ int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field);
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
+void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause, bool pfc);
+
+extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
+
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index b7141fdc279e..e88269fe3de7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -130,9 +130,8 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
return;
}
- pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
- pause->tx_pause = pause->rx_pause ^
- !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+ pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
+ pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
pause->autoneg = AUTONEG_DISABLE;
}
@@ -277,7 +276,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
- for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
+ for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
@@ -547,7 +546,7 @@ static int do_cls_rule(struct net_device *net_dev,
dma_addr_t key_iova;
u64 fields = 0;
void *key_buf;
- int err;
+ int i, err;
if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
@@ -607,11 +606,18 @@ static int do_cls_rule(struct net_device *net_dev,
fs_act.options |= DPNI_FS_OPT_DISCARD;
else
fs_act.flow_id = fs->ring_cookie;
- err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
- fs->location, &rule_cfg, &fs_act);
- } else {
- err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
- &rule_cfg);
+ }
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (add)
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
+ i, fs->location, &rule_cfg,
+ &fs_act);
+ else
+ err = dpni_remove_fs_entry(priv->mc_io, 0,
+ priv->mc_token, i,
+ &rule_cfg);
+ if (err)
+ break;
}
dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index d9b6918807af..fd069f67be9b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -59,6 +59,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
+#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
+#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243)
#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
@@ -567,4 +571,59 @@ struct dpni_cmd_remove_fs_entry {
__le64 mask_iova;
};
+#define DPNI_DISCARD_ON_MISS_SHIFT 0
+#define DPNI_DISCARD_ON_MISS_SIZE 1
+
+struct dpni_cmd_set_qos_table {
+ __le32 pad;
+ u8 default_tc;
+ /* only the LSB */
+ u8 discard_on_miss;
+ __le16 pad1[21];
+ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_qos_entry {
+ __le16 pad;
+ u8 tc_id;
+ u8 key_size;
+ __le16 index;
+ __le16 pad1;
+ __le64 key_iova;
+ __le64 mask_iova;
+};
+
+struct dpni_cmd_remove_qos_entry {
+ u8 pad[3];
+ u8 key_size;
+ __le32 pad1;
+ __le64 key_iova;
+ __le64 mask_iova;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_CONG_UNITS_SHIFT 4
+#define DPNI_CONG_UNITS_SIZE 2
+
+struct dpni_cmd_set_congestion_notification {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 tc;
+ u8 pad[6];
+ /* cmd word 1 */
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ u8 type_units;
+ /* cmd word 2 */
+ __le64 message_iova;
+ /* cmd word 3 */
+ __le64 message_ctx;
+ /* cmd word 4 */
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index dd54e6953aeb..6b479ba66465 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1355,6 +1355,52 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Congestion notification configuration
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ const struct dpni_congestion_notification_cfg *cfg)
+{
+ struct dpni_cmd_set_congestion_notification *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header =
+ mc_encode_cmd_header(DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc_id;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ dpni_set_field(cmd_params->type_units, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpni_set_queue() - Set queue parameters
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -1786,3 +1832,134 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+/**
+ * dpni_set_qos_table() - Set QoS mapping table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS table configuration
+ *
+ * This function and all QoS-related functions require that
+ *'max_tcs > 1' was set at DPNI creation.
+ *
+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
+ * prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg)
+{
+ struct dpni_cmd_set_qos_table *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
+ cmd_params->default_tc = cfg->default_tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+ dpni_set_field(cmd_params->discard_on_miss, DISCARD_ON_MISS,
+ cfg->discard_on_miss);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to add
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the QoS table where to insert the entry.
+ * Only relevant if MASKING is enabled for QoS classification on
+ * this DPNI, it is ignored for exact match.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index)
+{
+ struct dpni_cmd_add_qos_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_qos_entry() - Remove QoS mapping entry
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: QoS rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_qos_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_qos_table() - Clear all QoS mapping entries
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Following this function call, all frames are directed to
+ * the default traffic class (0)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index ee0711d06b3a..e874d8084142 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -514,6 +514,11 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io,
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
/**
+ * Enable priority flow control pause frames
+ */
+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
+
+/**
* struct - Structure representing DPNI link configuration
* @rate: Rate
* @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
@@ -716,6 +721,26 @@ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
const struct dpni_rx_dist_cfg *cfg);
/**
+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * key extractions to be used as the QoS criteria by calling
+ * dpkg_prepare_key_cfg()
+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
+ * '0' to use the 'default_tc' in such cases
+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
+ */
+struct dpni_qos_tbl_cfg {
+ u64 key_cfg_iova;
+ int discard_on_miss;
+ u8 default_tc;
+};
+
+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_qos_tbl_cfg *cfg);
+
+/**
* enum dpni_dest - DPNI destination types
* @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
* does not generate FQDAN notifications; user is expected to
@@ -858,6 +883,62 @@ enum dpni_congestion_point {
};
/**
+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid
+ * values are 0-1 or 0-7, depending on the number of priorities
+ * in that channel; not relevant for 'DPNI_DEST_NONE' option
+ */
+struct dpni_dest_cfg {
+ enum dpni_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/* DPNI congestion options */
+
+/**
+ * This congestion will trigger flow control or priority flow control.
+ * This will have effect only if flow control is enabled with
+ * dpni_set_link_cfg().
+ */
+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
+
+/**
+ * struct dpni_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: Units type
+ * @threshold_entry: Above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: Below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
+ * is contained in 'options'
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
+ */
+
+struct dpni_congestion_notification_cfg {
+ enum dpni_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpni_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpni_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc_id,
+ const struct dpni_congestion_notification_cfg *cfg);
+
+/**
* struct dpni_taildrop - Structure representing the taildrop
* @enable: Indicates whether the taildrop is active or not.
* @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
@@ -961,6 +1042,22 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
u8 tc_id,
const struct dpni_rule_cfg *cfg);
+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg,
+ u8 tc_id,
+ u16 index);
+
+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rule_cfg *cfg);
+
+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
int dpni_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index ccf2611f4a20..298c55786fd9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -756,6 +756,9 @@ void enetc_get_si_caps(struct enetc_si *si)
if (val & ENETC_SIPCAPR0_QBV)
si->hw_features |= ENETC_SI_F_QBV;
+
+ if (val & ENETC_SIPCAPR0_PSFP)
+ si->hw_features |= ENETC_SI_F_PSFP;
}
static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
@@ -1518,6 +1521,8 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
return enetc_setup_tc_cbs(ndev, type_data);
case TC_SETUP_QDISC_ETF:
return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -1567,15 +1572,42 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
+static int enetc_set_psfp(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
+ int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
- return 0;
+ if (changed & NETIF_F_HW_TC)
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+
+ return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 56c43f35b633..b705464f6882 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -151,6 +151,7 @@ enum enetc_errata {
};
#define ENETC_SI_F_QBV BIT(0)
+#define ENETC_SI_F_PSFP BIT(1)
/* PCI IEP device data */
struct enetc_si {
@@ -203,12 +204,20 @@ struct enetc_cls_rule {
};
#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+struct psfp_cap {
+ u32 max_streamid;
+ u32 max_psfp_filter;
+ u32 max_psfp_gate;
+ u32 max_psfp_gatelist;
+ u32 max_psfp_meter;
+};
/* TODO: more hardware offloads */
enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(0),
ENETC_F_TX_TSTAMP = BIT(1),
ENETC_F_QBV = BIT(2),
+ ENETC_F_QCI = BIT(3),
};
struct enetc_ndev_priv {
@@ -231,6 +240,8 @@ struct enetc_ndev_priv {
struct enetc_cls_rule *cls_rules;
+ struct psfp_cap psfp_cap;
+
struct device_node *phy_node;
phy_interface_t if_mode;
};
@@ -289,9 +300,84 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct net_device *ndev);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
+int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
+int enetc_psfp_init(struct enetc_ndev_priv *priv);
+int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+
+static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
+{
+ u32 reg;
+
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
+ /* Port stream filter capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
+ /* Port stream gate capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
+ priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
+ /* Port flow meter capability */
+ reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
+}
+
+static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int err;
+
+ enetc_get_max_cap(priv);
+
+ err = enetc_psfp_init(priv);
+ if (err)
+ return err;
+
+ enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
+ ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
+ ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
+
+ return 0;
+}
+
+static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int err;
+
+ err = enetc_psfp_clean(priv);
+ if (err)
+ return err;
+
+ enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
+ ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
+ ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
+
+ memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
+
+ return 0;
+}
+
#else
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(ndev) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
+#define enetc_setup_tc_block_cb NULL
+
+#define enetc_get_max_cap(p) \
+ memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
+
+static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
+{
+ return 0;
+}
+
+static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 2a6523136947..6314051bc6c1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -19,6 +19,7 @@
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
#define ENETC_SIPCAPR0_QBV BIT(4)
+#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
@@ -228,6 +229,15 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
#define ENETC_PM0_IFM_XGMII BIT(12)
+#define ENETC_PSIDCAPR 0x1b08
+#define ENETC_PSIDCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSFCAPR 0x1b18
+#define ENETC_PSFCAPR_MSK GENMASK(15, 0)
+#define ENETC_PSGCAPR 0x1b28
+#define ENETC_PSGCAPR_GCL_MSK GENMASK(18, 16)
+#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0)
+#define ENETC_PFMCAPR 0x1b38
+#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
/* MAC counters */
#define ENETC_PM0_REOCT 0x8100
@@ -557,6 +567,9 @@ enum bdcr_cmd_class {
BDCR_CMD_RFS,
BDCR_CMD_PORT_GCL,
BDCR_CMD_RECV_CLASSIFIER,
+ BDCR_CMD_STREAM_IDENTIFY,
+ BDCR_CMD_STREAM_FILTER,
+ BDCR_CMD_STREAM_GCL,
__BDCR_CMD_MAX_LEN,
BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
};
@@ -588,13 +601,152 @@ struct tgs_gcl_data {
struct gce entry[];
};
+/* class 7, command 0, Stream Identity Entry Configuration */
+struct streamid_conf {
+ __le32 stream_handle; /* init gate value */
+ __le32 iports;
+ u8 id_type;
+ u8 oui[3];
+ u8 res[3];
+ u8 en;
+};
+
+#define ENETC_CBDR_SID_VID_MASK 0xfff
+#define ENETC_CBDR_SID_VIDM BIT(12)
+#define ENETC_CBDR_SID_TG_MASK 0xc000
+/* streamid_conf address point to this data space */
+struct streamid_data {
+ union {
+ u8 dmac[6];
+ u8 smac[6];
+ };
+ u16 vid_vidm_tg;
+};
+
+#define ENETC_CBDR_SFI_PRI_MASK 0x7
+#define ENETC_CBDR_SFI_PRIM BIT(3)
+#define ENETC_CBDR_SFI_BLOV BIT(4)
+#define ENETC_CBDR_SFI_BLEN BIT(5)
+#define ENETC_CBDR_SFI_MSDUEN BIT(6)
+#define ENETC_CBDR_SFI_FMITEN BIT(7)
+#define ENETC_CBDR_SFI_ENABLE BIT(7)
+/* class 8, command 0, Stream Filter Instance, Short Format */
+struct sfi_conf {
+ __le32 stream_handle;
+ u8 multi;
+ u8 res[2];
+ u8 sthm;
+ /* Max Service Data Unit or Flow Meter Instance Table index.
+ * Depending on the value of FLT this represents either Max
+ * Service Data Unit (max frame size) allowed by the filter
+ * entry or is an index into the Flow Meter Instance table
+ * index identifying the policer which will be used to police
+ * it.
+ */
+ __le16 fm_inst_table_index;
+ __le16 msdu;
+ __le16 sg_inst_table_index;
+ u8 res1[2];
+ __le32 input_ports;
+ u8 res2[3];
+ u8 en;
+};
+
+/* class 8, command 2 stream Filter Instance status query short format
+ * command no need structure define
+ * Stream Filter Instance Query Statistics Response data
+ */
+struct sfi_counter_data {
+ u32 matchl;
+ u32 matchh;
+ u32 msdu_dropl;
+ u32 msdu_droph;
+ u32 stream_gate_dropl;
+ u32 stream_gate_droph;
+ u32 flow_meter_dropl;
+ u32 flow_meter_droph;
+};
+
+#define ENETC_CBDR_SGI_OIPV_MASK 0x7
+#define ENETC_CBDR_SGI_OIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_CGTST BIT(6)
+#define ENETC_CBDR_SGI_OGTST BIT(7)
+#define ENETC_CBDR_SGI_CFG_CHG BIT(1)
+#define ENETC_CBDR_SGI_CFG_PND BIT(2)
+#define ENETC_CBDR_SGI_OEX BIT(4)
+#define ENETC_CBDR_SGI_OEXEN BIT(5)
+#define ENETC_CBDR_SGI_IRX BIT(6)
+#define ENETC_CBDR_SGI_IRXEN BIT(7)
+#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3
+#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc
+#define ENETC_CBDR_SGI_EN BIT(7)
+/* class 9, command 0, Stream Gate Instance Table, Short Format
+ * class 9, command 2, Stream Gate Instance Table entry query write back
+ * Short Format
+ */
+struct sgi_table {
+ u8 res[8];
+ u8 oipv;
+ u8 res0[2];
+ u8 ocgtst;
+ u8 res1[7];
+ u8 gset;
+ u8 oacl_len;
+ u8 res2[2];
+ u8 en;
+};
+
+#define ENETC_CBDR_SGI_AIPV_MASK 0x7
+#define ENETC_CBDR_SGI_AIPV_EN BIT(3)
+#define ENETC_CBDR_SGI_AGTST BIT(7)
+
+/* class 9, command 1, Stream Gate Control List, Long Format */
+struct sgcl_conf {
+ u8 aipv;
+ u8 res[2];
+ u8 agtst;
+ u8 res1[4];
+ union {
+ struct {
+ u8 res2[4];
+ u8 acl_len;
+ u8 res3[3];
+ };
+ u8 cct[8]; /* Config change time */
+ };
+};
+
+#define ENETC_CBDR_SGL_IOMEN BIT(0)
+#define ENETC_CBDR_SGL_IPVEN BIT(3)
+#define ENETC_CBDR_SGL_GTST BIT(4)
+#define ENETC_CBDR_SGL_IPV_MASK 0xe
+/* Stream Gate Control List Entry */
+struct sgce {
+ u32 interval;
+ u8 msdu[3];
+ u8 multi;
+};
+
+/* stream control list class 9 , cmd 1 data buffer */
+struct sgcl_data {
+ u32 btl;
+ u32 bth;
+ u32 ct;
+ u32 cte;
+ struct sgce sgcl[0];
+};
+
struct enetc_cbd {
union{
+ struct sfi_conf sfi_conf;
+ struct sgi_table sgi_table;
struct {
__le32 addr[2];
union {
__le32 opt[4];
struct tgs_gcl_conf gcl_conf;
+ struct streamid_conf sid_set;
+ struct sgcl_conf sgcl_conf;
};
}; /* Long format */
__le32 data[6];
@@ -621,3 +773,10 @@ struct enetc_cbd {
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
#define ENETC_TSDE BIT(31)
+
+/* PSFP setting */
+#define ENETC_PPSFPMR 0x11b00
+#define ENETC_PPSFPMR_PSFPEN BIT(0)
+#define ENETC_PPSFPMR_VS BIT(1)
+#define ENETC_PPSFPMR_PVC BIT(2)
+#define ENETC_PPSFPMR_PVZC BIT(3)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 85e2b741df41..824d211ec00f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -50,21 +50,6 @@ static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val);
}
-static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx)
-{
- return pf->vlan_promisc_simap & BIT(si_idx);
-}
-
-static bool enetc_vlan_filter_is_on(struct enetc_pf *pf)
-{
- int i;
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID)
- return true;
-
- return false;
-}
-
static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx)
{
pf->vlan_promisc_simap |= BIT(si_idx);
@@ -204,6 +189,7 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_pf *pf = enetc_si_priv(priv->si);
+ char vlan_promisc_simap = pf->vlan_promisc_simap;
struct enetc_hw *hw = &priv->si->hw;
bool uprom = false, mprom = false;
struct enetc_mac_filter *filter;
@@ -216,16 +202,16 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
uprom = true;
mprom = true;
- /* enable VLAN promisc mode for SI0 */
- if (!enetc_si_vlan_promisc_is_on(pf, 0))
- enetc_enable_si_vlan_promisc(pf, 0);
-
+ /* Enable VLAN promiscuous mode for SI0 (PF) */
+ vlan_promisc_simap |= BIT(0);
} else if (ndev->flags & IFF_ALLMULTI) {
/* enable multi cast promisc mode for SI0 (PF) */
psipmr = ENETC_PSIPMR_SET_MP(0);
mprom = true;
}
+ enetc_set_vlan_promisc(&pf->si->hw, vlan_promisc_simap);
+
/* first 2 filter entries belong to PF */
if (!uprom) {
/* Update unicast filters */
@@ -306,9 +292,6 @@ static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
struct enetc_pf *pf = enetc_si_priv(priv->si);
int idx;
- if (enetc_si_vlan_promisc_is_on(pf, 0))
- enetc_disable_si_vlan_promisc(pf, 0);
-
__set_bit(vid, pf->active_vlans);
idx = enetc_vid_hash_idx(vid);
@@ -326,9 +309,6 @@ static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
__clear_bit(vid, pf->active_vlans);
enetc_sync_vlan_ht_filter(pf, true);
- if (!enetc_vlan_filter_is_on(pf))
- enetc_enable_si_vlan_promisc(pf, 0);
-
return 0;
}
@@ -677,6 +657,15 @@ static int enetc_pf_set_features(struct net_device *ndev,
enetc_enable_txvlan(&priv->si->hw, 0,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (!!(features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ enetc_disable_si_vlan_promisc(pf, 0);
+ else
+ enetc_enable_si_vlan_promisc(pf, 0);
+ }
+
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
@@ -719,12 +708,11 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_LOOPBACK;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
@@ -739,6 +727,12 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
if (si->hw_features & ENETC_SI_F_QBV)
priv->active_offloads |= ENETC_F_QBV;
+ if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
+ priv->active_offloads |= ENETC_F_QCI;
+ ndev->features |= NETIF_F_HW_TC;
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 0c6bf3a55a9a..fd3df19eaa32 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -5,6 +5,9 @@
#include <net/pkt_sched.h>
#include <linux/math64.h>
+#include <linux/refcount.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gate.h>
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
@@ -331,3 +334,1103 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
return 0;
}
+
+enum streamid_type {
+ STREAMID_TYPE_RESERVED = 0,
+ STREAMID_TYPE_NULL,
+ STREAMID_TYPE_SMAC,
+};
+
+enum streamid_vlan_tagged {
+ STREAMID_VLAN_RESERVED = 0,
+ STREAMID_VLAN_TAGGED,
+ STREAMID_VLAN_UNTAGGED,
+ STREAMID_VLAN_ALL,
+};
+
+#define ENETC_PSFP_WILDCARD -1
+#define HANDLE_OFFSET 100
+
+enum forward_type {
+ FILTER_ACTION_TYPE_PSFP = BIT(0),
+ FILTER_ACTION_TYPE_ACL = BIT(1),
+ FILTER_ACTION_TYPE_BOTH = GENMASK(1, 0),
+};
+
+/* This is for limit output type for input actions */
+struct actions_fwd {
+ u64 actions;
+ u64 keys; /* include the must needed keys */
+ enum forward_type output;
+};
+
+struct psfp_streamfilter_counters {
+ u64 matching_frames_count;
+ u64 passing_frames_count;
+ u64 not_passing_frames_count;
+ u64 passing_sdu_count;
+ u64 not_passing_sdu_count;
+ u64 red_frames_count;
+};
+
+struct enetc_streamid {
+ u32 index;
+ union {
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ };
+ u8 filtertype;
+ u16 vid;
+ u8 tagged;
+ s32 handle;
+};
+
+struct enetc_psfp_filter {
+ u32 index;
+ s32 handle;
+ s8 prio;
+ u32 gate_id;
+ s32 meter_id;
+ refcount_t refcount;
+ struct hlist_node node;
+};
+
+struct enetc_psfp_gate {
+ u32 index;
+ s8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletimext;
+ u32 num_entries;
+ refcount_t refcount;
+ struct hlist_node node;
+ struct action_gate_entry entries[0];
+};
+
+struct enetc_stream_filter {
+ struct enetc_streamid sid;
+ u32 sfi_index;
+ u32 sgi_index;
+ struct flow_stats stats;
+ struct hlist_node node;
+};
+
+struct enetc_psfp {
+ unsigned long dev_bitmap;
+ unsigned long *psfp_sfi_bitmap;
+ struct hlist_head stream_list;
+ struct hlist_head psfp_filter_list;
+ struct hlist_head psfp_gate_list;
+ spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
+};
+
+static struct actions_fwd enetc_act_fwd[] = {
+ {
+ BIT(FLOW_ACTION_GATE),
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ FILTER_ACTION_TYPE_PSFP
+ },
+ /* example for ACL actions */
+ {
+ BIT(FLOW_ACTION_DROP),
+ 0,
+ FILTER_ACTION_TYPE_ACL
+ }
+};
+
+static struct enetc_psfp epsfp = {
+ .psfp_sfi_bitmap = NULL,
+};
+
+static LIST_HEAD(enetc_block_cb_list);
+
+static inline int enetc_get_port(struct enetc_ndev_priv *priv)
+{
+ return priv->si->pdev->devfn & 0x7;
+}
+
+/* Stream Identity Entry Set Descriptor */
+static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_streamid *sid,
+ u8 enable)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct streamid_data *si_data;
+ struct streamid_conf *si_conf;
+ u16 data_size;
+ dma_addr_t dma;
+ int err;
+
+ if (sid->index >= priv->psfp_cap.max_streamid)
+ return -EINVAL;
+
+ if (sid->filtertype != STREAMID_TYPE_NULL &&
+ sid->filtertype != STREAMID_TYPE_SMAC)
+ return -EOPNOTSUPP;
+
+ /* Disable operation before enable */
+ cbd.index = cpu_to_le16((u16)sid->index);
+ cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
+ cbd.status_flags = 0;
+
+ data_size = sizeof(struct streamid_data);
+ si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ cbd.length = cpu_to_le16(data_size);
+
+ dma = dma_map_single(&priv->si->pdev->dev, si_data,
+ data_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(si_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+ memset(si_data->dmac, 0xff, ETH_ALEN);
+ si_data->vid_vidm_tg =
+ cpu_to_le16(ENETC_CBDR_SID_VID_MASK
+ + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
+
+ si_conf = &cbd.sid_set;
+ /* Only one port supported for one entry, set itself */
+ si_conf->iports = 1 << enetc_get_port(priv);
+ si_conf->id_type = 1;
+ si_conf->oui[2] = 0x0;
+ si_conf->oui[1] = 0x80;
+ si_conf->oui[0] = 0xC2;
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ return -EINVAL;
+
+ if (!enable) {
+ kfree(si_data);
+ return 0;
+ }
+
+ /* Enable the entry overwrite again incase space flushed by hardware */
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.index = cpu_to_le16((u16)sid->index);
+ cbd.cmd = 0;
+ cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
+ cbd.status_flags = 0;
+
+ si_conf->en = 0x80;
+ si_conf->stream_handle = cpu_to_le32(sid->handle);
+ si_conf->iports = 1 << enetc_get_port(priv);
+ si_conf->id_type = sid->filtertype;
+ si_conf->oui[2] = 0x0;
+ si_conf->oui[1] = 0x80;
+ si_conf->oui[0] = 0xC2;
+
+ memset(si_data, 0, data_size);
+
+ cbd.length = cpu_to_le16(data_size);
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ /* VIDM default to be 1.
+ * VID Match. If set (b1) then the VID must match, otherwise
+ * any VID is considered a match. VIDM setting is only used
+ * when TG is set to b01.
+ */
+ if (si_conf->id_type == STREAMID_TYPE_NULL) {
+ ether_addr_copy(si_data->dmac, sid->dst_mac);
+ si_data->vid_vidm_tg =
+ cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM));
+ } else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
+ ether_addr_copy(si_data->smac, sid->src_mac);
+ si_data->vid_vidm_tg =
+ cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM));
+ }
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ kfree(si_data);
+
+ return err;
+}
+
+/* Stream Filter Instance Set Descriptor */
+static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_filter *sfi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = {.cmd = 0};
+ struct sfi_conf *sfi_config;
+
+ cbd.index = cpu_to_le16(sfi->index);
+ cbd.cls = BDCR_CMD_STREAM_FILTER;
+ cbd.status_flags = 0x80;
+ cbd.length = cpu_to_le16(1);
+
+ sfi_config = &cbd.sfi_conf;
+ if (!enable)
+ goto exit;
+
+ sfi_config->en = 0x80;
+
+ if (sfi->handle >= 0) {
+ sfi_config->stream_handle =
+ cpu_to_le32(sfi->handle);
+ sfi_config->sthm |= 0x80;
+ }
+
+ sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
+ sfi_config->input_ports = 1 << enetc_get_port(priv);
+
+ /* The priority value which may be matched against the
+ * frame’s priority value to determine a match for this entry.
+ */
+ if (sfi->prio >= 0)
+ sfi_config->multi |= (sfi->prio & 0x7) | 0x8;
+
+ /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
+ * field as being either an MSDU value or an index into the Flow
+ * Meter Instance table.
+ * TODO: no limit max sdu
+ */
+
+ if (sfi->meter_id >= 0) {
+ sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
+ sfi_config->multi |= 0x80;
+ }
+
+exit:
+ return enetc_send_cmd(priv->si, &cbd);
+}
+
+static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
+ u32 index,
+ struct psfp_streamfilter_counters *cnt)
+{
+ struct enetc_cbd cbd = { .cmd = 2 };
+ struct sfi_counter_data *data_buf;
+ dma_addr_t dma;
+ u16 data_size;
+ int err;
+
+ cbd.index = cpu_to_le16((u16)index);
+ cbd.cmd = 2;
+ cbd.cls = BDCR_CMD_STREAM_FILTER;
+ cbd.status_flags = 0;
+
+ data_size = sizeof(struct sfi_counter_data);
+ data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ dma = dma_map_single(&priv->si->pdev->dev, data_buf,
+ data_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ cbd.length = cpu_to_le16(data_size);
+
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ goto exit;
+
+ cnt->matching_frames_count =
+ ((u64)le32_to_cpu(data_buf->matchh) << 32)
+ + data_buf->matchl;
+
+ cnt->not_passing_sdu_count =
+ ((u64)le32_to_cpu(data_buf->msdu_droph) << 32)
+ + data_buf->msdu_dropl;
+
+ cnt->passing_sdu_count = cnt->matching_frames_count
+ - cnt->not_passing_sdu_count;
+
+ cnt->not_passing_frames_count =
+ ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32)
+ + le32_to_cpu(data_buf->stream_gate_dropl);
+
+ cnt->passing_frames_count = cnt->matching_frames_count
+ - cnt->not_passing_sdu_count
+ - cnt->not_passing_frames_count;
+
+ cnt->red_frames_count =
+ ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32)
+ + le32_to_cpu(data_buf->flow_meter_dropl);
+
+exit:
+ kfree(data_buf);
+ return err;
+}
+
+static u64 get_ptp_now(struct enetc_hw *hw)
+{
+ u64 now_lo, now_hi, now;
+
+ now_lo = enetc_rd(hw, ENETC_SICTR0);
+ now_hi = enetc_rd(hw, ENETC_SICTR1);
+ now = now_lo | now_hi << 32;
+
+ return now;
+}
+
+static int get_start_ns(u64 now, u64 cycle, u64 *start)
+{
+ u64 n;
+
+ if (!cycle)
+ return -EFAULT;
+
+ n = div64_u64(now, cycle);
+
+ *start = (n + 1) * cycle;
+
+ return 0;
+}
+
+/* Stream Gate Instance Set Descriptor */
+static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_psfp_gate *sgi,
+ u8 enable)
+{
+ struct enetc_cbd cbd = { .cmd = 0 };
+ struct sgi_table *sgi_config;
+ struct sgcl_conf *sgcl_config;
+ struct sgcl_data *sgcl_data;
+ struct sgce *sgce;
+ dma_addr_t dma;
+ u16 data_size;
+ int err, i;
+ u64 now;
+
+ cbd.index = cpu_to_le16(sgi->index);
+ cbd.cmd = 0;
+ cbd.cls = BDCR_CMD_STREAM_GCL;
+ cbd.status_flags = 0x80;
+
+ /* disable */
+ if (!enable)
+ return enetc_send_cmd(priv->si, &cbd);
+
+ if (!sgi->num_entries)
+ return 0;
+
+ if (sgi->num_entries > priv->psfp_cap.max_psfp_gatelist ||
+ !sgi->cycletime)
+ return -EINVAL;
+
+ /* enable */
+ sgi_config = &cbd.sgi_table;
+
+ /* Keep open before gate list start */
+ sgi_config->ocgtst = 0x80;
+
+ sgi_config->oipv = (sgi->init_ipv < 0) ?
+ 0x0 : ((sgi->init_ipv & 0x7) | 0x8);
+
+ sgi_config->en = 0x80;
+
+ /* Basic config */
+ err = enetc_send_cmd(priv->si, &cbd);
+ if (err)
+ return -EINVAL;
+
+ memset(&cbd, 0, sizeof(cbd));
+
+ cbd.index = cpu_to_le16(sgi->index);
+ cbd.cmd = 1;
+ cbd.cls = BDCR_CMD_STREAM_GCL;
+ cbd.status_flags = 0;
+
+ sgcl_config = &cbd.sgcl_conf;
+
+ sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
+
+ data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
+
+ sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!sgcl_data)
+ return -ENOMEM;
+
+ cbd.length = cpu_to_le16(data_size);
+
+ dma = dma_map_single(&priv->si->pdev->dev,
+ sgcl_data, data_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
+ netdev_err(priv->si->ndev, "DMA mapping failed!\n");
+ kfree(sgcl_data);
+ return -ENOMEM;
+ }
+
+ cbd.addr[0] = lower_32_bits(dma);
+ cbd.addr[1] = upper_32_bits(dma);
+
+ sgce = &sgcl_data->sgcl[0];
+
+ sgcl_config->agtst = 0x80;
+
+ sgcl_data->ct = cpu_to_le32(sgi->cycletime);
+ sgcl_data->cte = cpu_to_le32(sgi->cycletimext);
+
+ if (sgi->init_ipv >= 0)
+ sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
+
+ for (i = 0; i < sgi->num_entries; i++) {
+ struct action_gate_entry *from = &sgi->entries[i];
+ struct sgce *to = &sgce[i];
+
+ if (from->gate_state)
+ to->multi |= 0x10;
+
+ if (from->ipv >= 0)
+ to->multi |= ((from->ipv & 0x7) << 5) | 0x08;
+
+ if (from->maxoctets >= 0) {
+ to->multi |= 0x01;
+ to->msdu[0] = from->maxoctets & 0xFF;
+ to->msdu[1] = (from->maxoctets >> 8) & 0xFF;
+ to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
+ }
+
+ to->interval = cpu_to_le32(from->interval);
+ }
+
+ /* If basetime is less than now, calculate start time */
+ now = get_ptp_now(&priv->si->hw);
+
+ if (sgi->basetime < now) {
+ u64 start;
+
+ err = get_start_ns(now, sgi->cycletime, &start);
+ if (err)
+ goto exit;
+ sgcl_data->btl = cpu_to_le32(lower_32_bits(start));
+ sgcl_data->bth = cpu_to_le32(upper_32_bits(start));
+ } else {
+ u32 hi, lo;
+
+ hi = upper_32_bits(sgi->basetime);
+ lo = lower_32_bits(sgi->basetime);
+ sgcl_data->bth = cpu_to_le32(hi);
+ sgcl_data->btl = cpu_to_le32(lo);
+ }
+
+ err = enetc_send_cmd(priv->si, &cbd);
+
+exit:
+ kfree(sgcl_data);
+
+ return err;
+}
+
+static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
+{
+ struct enetc_stream_filter *f;
+
+ hlist_for_each_entry(f, &epsfp.stream_list, node)
+ if (f->sid.index == index)
+ return f;
+
+ return NULL;
+}
+
+static struct enetc_psfp_gate *enetc_get_gate_by_index(u32 index)
+{
+ struct enetc_psfp_gate *g;
+
+ hlist_for_each_entry(g, &epsfp.psfp_gate_list, node)
+ if (g->index == index)
+ return g;
+
+ return NULL;
+}
+
+static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
+{
+ struct enetc_psfp_filter *s;
+
+ hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
+ if (s->index == index)
+ return s;
+
+ return NULL;
+}
+
+static struct enetc_psfp_filter
+ *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
+{
+ struct enetc_psfp_filter *s;
+
+ hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
+ if (s->gate_id == sfi->gate_id &&
+ s->prio == sfi->prio &&
+ s->meter_id == sfi->meter_id)
+ return s;
+
+ return NULL;
+}
+
+static int enetc_get_free_index(struct enetc_ndev_priv *priv)
+{
+ u32 max_size = priv->psfp_cap.max_psfp_filter;
+ unsigned long index;
+
+ index = find_first_zero_bit(epsfp.psfp_sfi_bitmap, max_size);
+ if (index == max_size)
+ return -1;
+
+ return index;
+}
+
+static void stream_filter_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_filter *sfi;
+ u8 z;
+
+ sfi = enetc_get_filter_by_index(index);
+ WARN_ON(!sfi);
+ z = refcount_dec_and_test(&sfi->refcount);
+
+ if (z) {
+ enetc_streamfilter_hw_set(priv, sfi, false);
+ hlist_del(&sfi->node);
+ kfree(sfi);
+ clear_bit(index, epsfp.psfp_sfi_bitmap);
+ }
+}
+
+static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+ struct enetc_psfp_gate *sgi;
+ u8 z;
+
+ sgi = enetc_get_gate_by_index(index);
+ WARN_ON(!sgi);
+ z = refcount_dec_and_test(&sgi->refcount);
+ if (z) {
+ enetc_streamgate_hw_set(priv, sgi, false);
+ hlist_del(&sgi->node);
+ kfree(sgi);
+ }
+}
+
+static void remove_one_chain(struct enetc_ndev_priv *priv,
+ struct enetc_stream_filter *filter)
+{
+ stream_gate_unref(priv, filter->sgi_index);
+ stream_filter_unref(priv, filter->sfi_index);
+
+ hlist_del(&filter->node);
+ kfree(filter);
+}
+
+static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
+ struct enetc_streamid *sid,
+ struct enetc_psfp_filter *sfi,
+ struct enetc_psfp_gate *sgi)
+{
+ int err;
+
+ err = enetc_streamid_hw_set(priv, sid, true);
+ if (err)
+ return err;
+
+ if (sfi) {
+ err = enetc_streamfilter_hw_set(priv, sfi, true);
+ if (err)
+ goto revert_sid;
+ }
+
+ err = enetc_streamgate_hw_set(priv, sgi, true);
+ if (err)
+ goto revert_sfi;
+
+ return 0;
+
+revert_sfi:
+ if (sfi)
+ enetc_streamfilter_hw_set(priv, sfi, false);
+revert_sid:
+ enetc_streamid_hw_set(priv, sid, false);
+ return err;
+}
+
+static struct actions_fwd *enetc_check_flow_actions(u64 acts,
+ unsigned int inputkeys)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_act_fwd); i++)
+ if (acts == enetc_act_fwd[i].actions &&
+ inputkeys & enetc_act_fwd[i].keys)
+ return &enetc_act_fwd[i];
+
+ return NULL;
+}
+
+static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct enetc_stream_filter *filter, *old_filter;
+ struct enetc_psfp_filter *sfi, *old_sfi;
+ struct enetc_psfp_gate *sgi, *old_sgi;
+ struct flow_action_entry *entry;
+ struct action_gate_entry *e;
+ u8 sfi_overwrite = 0;
+ int entries_size;
+ int i, err;
+
+ if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
+ return -ENOSPC;
+ }
+
+ flow_action_for_each(i, entry, &rule->action)
+ if (entry->id == FLOW_ACTION_GATE)
+ break;
+
+ if (entry->id != FLOW_ACTION_GATE)
+ return -EINVAL;
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->sid.index = f->common.chain_index;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->dst) &&
+ !is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot match on both source and destination MAC");
+ err = EINVAL;
+ goto free_filter;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (!is_broadcast_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on destination MAC not supported");
+ err = EINVAL;
+ goto free_filter;
+ }
+ ether_addr_copy(filter->sid.dst_mac, match.key->dst);
+ filter->sid.filtertype = STREAMID_TYPE_NULL;
+ }
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (!is_broadcast_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on source MAC not supported");
+ err = EINVAL;
+ goto free_filter;
+ }
+ ether_addr_copy(filter->sid.src_mac, match.key->src);
+ filter->sid.filtertype = STREAMID_TYPE_SMAC;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
+ err = EINVAL;
+ goto free_filter;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority) {
+ if (match.mask->vlan_priority !=
+ (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
+ err = -EINVAL;
+ goto free_filter;
+ }
+ }
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN id");
+ err = -EINVAL;
+ goto free_filter;
+ }
+
+ filter->sid.vid = match.key->vlan_id;
+ if (!filter->sid.vid)
+ filter->sid.tagged = STREAMID_VLAN_UNTAGGED;
+ else
+ filter->sid.tagged = STREAMID_VLAN_TAGGED;
+ }
+ } else {
+ filter->sid.tagged = STREAMID_VLAN_ALL;
+ }
+
+ /* parsing gate action */
+ if (entry->gate.index >= priv->psfp_cap.max_psfp_gate) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
+ err = -ENOSPC;
+ goto free_filter;
+ }
+
+ if (entry->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
+ err = -ENOSPC;
+ goto free_filter;
+ }
+
+ entries_size = struct_size(sgi, entries, entry->gate.num_entries);
+ sgi = kzalloc(entries_size, GFP_KERNEL);
+ if (!sgi) {
+ err = -ENOMEM;
+ goto free_filter;
+ }
+
+ refcount_set(&sgi->refcount, 1);
+ sgi->index = entry->gate.index;
+ sgi->init_ipv = entry->gate.prio;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+
+ e = sgi->entries;
+ for (i = 0; i < entry->gate.num_entries; i++) {
+ e[i].gate_state = entry->gate.entries[i].gate_state;
+ e[i].interval = entry->gate.entries[i].interval;
+ e[i].ipv = entry->gate.entries[i].ipv;
+ e[i].maxoctets = entry->gate.entries[i].maxoctets;
+ }
+
+ filter->sgi_index = sgi->index;
+
+ sfi = kzalloc(sizeof(*sfi), GFP_KERNEL);
+ if (!sfi) {
+ err = -ENOMEM;
+ goto free_gate;
+ }
+
+ refcount_set(&sfi->refcount, 1);
+ sfi->gate_id = sgi->index;
+
+ /* flow meter not support yet */
+ sfi->meter_id = ENETC_PSFP_WILDCARD;
+
+ /* prio ref the filter prio */
+ if (f->common.prio && f->common.prio <= BIT(3))
+ sfi->prio = f->common.prio - 1;
+ else
+ sfi->prio = ENETC_PSFP_WILDCARD;
+
+ old_sfi = enetc_psfp_check_sfi(sfi);
+ if (!old_sfi) {
+ int index;
+
+ index = enetc_get_free_index(priv);
+ if (sfi->handle < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
+ err = -ENOSPC;
+ goto free_sfi;
+ }
+
+ sfi->index = index;
+ sfi->handle = index + HANDLE_OFFSET;
+ /* Update the stream filter handle also */
+ filter->sid.handle = sfi->handle;
+ filter->sfi_index = sfi->index;
+ sfi_overwrite = 0;
+ } else {
+ filter->sfi_index = old_sfi->index;
+ filter->sid.handle = old_sfi->handle;
+ sfi_overwrite = 1;
+ }
+
+ err = enetc_psfp_hw_set(priv, &filter->sid,
+ sfi_overwrite ? NULL : sfi, sgi);
+ if (err)
+ goto free_sfi;
+
+ spin_lock(&epsfp.psfp_lock);
+ /* Remove the old node if exist and update with a new node */
+ old_sgi = enetc_get_gate_by_index(filter->sgi_index);
+ if (old_sgi) {
+ refcount_set(&sgi->refcount,
+ refcount_read(&old_sgi->refcount) + 1);
+ hlist_del(&old_sgi->node);
+ kfree(old_sgi);
+ }
+
+ hlist_add_head(&sgi->node, &epsfp.psfp_gate_list);
+
+ if (!old_sfi) {
+ hlist_add_head(&sfi->node, &epsfp.psfp_filter_list);
+ set_bit(sfi->index, epsfp.psfp_sfi_bitmap);
+ } else {
+ kfree(sfi);
+ refcount_inc(&old_sfi->refcount);
+ }
+
+ old_filter = enetc_get_stream_by_index(filter->sid.index);
+ if (old_filter)
+ remove_one_chain(priv, old_filter);
+
+ filter->stats.lastused = jiffies;
+ hlist_add_head(&filter->node, &epsfp.stream_list);
+
+ spin_unlock(&epsfp.psfp_lock);
+
+ return 0;
+
+free_sfi:
+ kfree(sfi);
+free_gate:
+ kfree(sgi);
+free_filter:
+ kfree(filter);
+
+ return err;
+}
+
+static int enetc_config_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_action *action = &rule->action;
+ struct flow_action_entry *entry;
+ struct actions_fwd *fwd;
+ u64 actions = 0;
+ int i, err;
+
+ if (!flow_action_has_entries(action)) {
+ NL_SET_ERR_MSG_MOD(extack, "At least one action is needed");
+ return -EINVAL;
+ }
+
+ flow_action_for_each(i, entry, action)
+ actions |= BIT(entry->id);
+
+ fwd = enetc_check_flow_actions(actions, dissector->used_keys);
+ if (!fwd) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported filter type!");
+ return -EOPNOTSUPP;
+ }
+
+ if (fwd->output & FILTER_ACTION_TYPE_PSFP) {
+ err = enetc_psfp_parse_clsflower(priv, cls_flower);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid PSFP inputs");
+ return err;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported actions");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int enetc_psfp_destroy_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct enetc_stream_filter *filter;
+ struct netlink_ext_ack *extack = f->common.extack;
+ int err;
+
+ if (f->common.chain_index >= priv->psfp_cap.max_streamid) {
+ NL_SET_ERR_MSG_MOD(extack, "No Stream identify resource!");
+ return -ENOSPC;
+ }
+
+ filter = enetc_get_stream_by_index(f->common.chain_index);
+ if (!filter)
+ return -EINVAL;
+
+ err = enetc_streamid_hw_set(priv, &filter->sid, false);
+ if (err)
+ return err;
+
+ remove_one_chain(priv, filter);
+
+ return 0;
+}
+
+static int enetc_destroy_clsflower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ return enetc_psfp_destroy_clsflower(priv, f);
+}
+
+static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *f)
+{
+ struct psfp_streamfilter_counters counters = {};
+ struct enetc_stream_filter *filter;
+ struct flow_stats stats = {};
+ int err;
+
+ filter = enetc_get_stream_by_index(f->common.chain_index);
+ if (!filter)
+ return -EINVAL;
+
+ err = enetc_streamcounter_hw_get(priv, filter->sfi_index, &counters);
+ if (err)
+ return -EINVAL;
+
+ spin_lock(&epsfp.psfp_lock);
+ stats.pkts = counters.matching_frames_count - filter->stats.pkts;
+ stats.lastused = filter->stats.lastused;
+ filter->stats.pkts += stats.pkts;
+ spin_unlock(&epsfp.psfp_lock);
+
+ flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+ return 0;
+}
+
+static int enetc_setup_tc_cls_flower(struct enetc_ndev_priv *priv,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return enetc_config_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return enetc_destroy_clsflower(priv, cls_flower);
+ case FLOW_CLS_STATS:
+ return enetc_psfp_get_stats(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline void clean_psfp_sfi_bitmap(void)
+{
+ bitmap_free(epsfp.psfp_sfi_bitmap);
+ epsfp.psfp_sfi_bitmap = NULL;
+}
+
+static void clean_stream_list(void)
+{
+ struct enetc_stream_filter *s;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(s, tmp, &epsfp.stream_list, node) {
+ hlist_del(&s->node);
+ kfree(s);
+ }
+}
+
+static void clean_sfi_list(void)
+{
+ struct enetc_psfp_filter *sfi;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(sfi, tmp, &epsfp.psfp_filter_list, node) {
+ hlist_del(&sfi->node);
+ kfree(sfi);
+ }
+}
+
+static void clean_sgi_list(void)
+{
+ struct enetc_psfp_gate *sgi;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(sgi, tmp, &epsfp.psfp_gate_list, node) {
+ hlist_del(&sgi->node);
+ kfree(sgi);
+ }
+}
+
+static void clean_psfp_all(void)
+{
+ /* Disable all list nodes and free all memory */
+ clean_sfi_list();
+ clean_sgi_list();
+ clean_stream_list();
+ epsfp.dev_bitmap = 0;
+ clean_psfp_sfi_bitmap();
+}
+
+int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct net_device *ndev = cb_priv;
+
+ if (!tc_can_offload(ndev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return enetc_setup_tc_cls_flower(netdev_priv(ndev), type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int enetc_psfp_init(struct enetc_ndev_priv *priv)
+{
+ if (epsfp.psfp_sfi_bitmap)
+ return 0;
+
+ epsfp.psfp_sfi_bitmap = bitmap_zalloc(priv->psfp_cap.max_psfp_filter,
+ GFP_KERNEL);
+ if (!epsfp.psfp_sfi_bitmap)
+ return -ENOMEM;
+
+ spin_lock_init(&epsfp.psfp_lock);
+
+ if (list_empty(&enetc_block_cb_list))
+ epsfp.dev_bitmap = 0;
+
+ return 0;
+}
+
+int enetc_psfp_clean(struct enetc_ndev_priv *priv)
+{
+ if (!list_empty(&enetc_block_cb_list))
+ return -EBUSY;
+
+ clean_psfp_all();
+
+ return 0;
+}
+
+int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct flow_block_offload *f = type_data;
+ int err;
+
+ err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
+ enetc_setup_tc_block_cb,
+ ndev, ndev, true);
+ if (err)
+ return err;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ set_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ if (!epsfp.dev_bitmap)
+ clean_psfp_all();
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index e74dd1f86bba..a6cdd5b61921 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -376,8 +376,7 @@ struct bufdesc_ex {
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
-#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
-#define FEC_NAPI_IMASK FEC_ENET_MII
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF)
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* ENET interrupt coalescing macro define */
@@ -543,7 +542,6 @@ struct fec_enet_private {
int link;
int full_duplex;
int speed;
- struct completion mdio_done;
int irq[FEC_IRQ_NUM];
bool bufdesc_ex;
int pause_flag;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index dc6f8763a5d4..2d0d313ee7c5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -88,8 +88,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
struct fec_devinfo {
u32 quirks;
- u8 stop_gpr_reg;
- u8 stop_gpr_bit;
};
static const struct fec_devinfo fec_imx25_info = {
@@ -112,8 +110,6 @@ static const struct fec_devinfo fec_imx6q_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
FEC_QUIRK_HAS_RACC,
- .stop_gpr_reg = 0x34,
- .stop_gpr_bit = 27,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -976,8 +972,8 @@ fec_restart(struct net_device *ndev)
writel((__force u32)cpu_to_be32(temp_mac[1]),
fep->hwp + FEC_ADDR_HIGH);
- /* Clear any outstanding interrupt. */
- writel(0xffffffff, fep->hwp + FEC_IEVENT);
+ /* Clear any outstanding interrupt, except MDIO. */
+ writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
fec_enet_bd_init(ndev);
@@ -1123,7 +1119,7 @@ fec_restart(struct net_device *ndev)
if (fep->link)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
else
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+ writel(0, fep->hwp + FEC_IMASK);
/* Init the interrupt coalescing */
fec_enet_itr_coal_init(ndev);
@@ -1652,6 +1648,10 @@ fec_enet_interrupt(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
int_events = readl(fep->hwp + FEC_IEVENT);
+
+ /* Don't clear MDIO events, we poll for those */
+ int_events &= ~FEC_ENET_MII;
+
writel(int_events, fep->hwp + FEC_IEVENT);
fec_enet_collect_events(fep, int_events);
@@ -1659,16 +1659,12 @@ fec_enet_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED;
if (napi_schedule_prep(&fep->napi)) {
- /* Disable the NAPI interrupts */
- writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
+ /* Disable interrupts */
+ writel(0, fep->hwp + FEC_IMASK);
__napi_schedule(&fep->napi);
}
}
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- complete(&fep->mdio_done);
- }
return ret;
}
@@ -1818,11 +1814,24 @@ static void fec_enet_adjust_link(struct net_device *ndev)
phy_print_status(phy_dev);
}
+static int fec_enet_mdio_wait(struct fec_enet_private *fep)
+{
+ uint ievent;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
+ ievent & FEC_ENET_MII, 2, 30000);
+
+ if (!ret)
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ return ret;
+}
+
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
int ret = 0, frame_start, frame_addr, frame_op;
bool is_c45 = !!(regnum & MII_ADDR_C45);
@@ -1830,8 +1839,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ret < 0)
return ret;
- reinit_completion(&fep->mdio_done);
-
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -1843,11 +1850,9 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO address write timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
@@ -1866,11 +1871,9 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO read timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
@@ -1888,7 +1891,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
int ret, frame_start, frame_addr;
bool is_c45 = !!(regnum & MII_ADDR_C45);
@@ -1898,8 +1900,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
else
ret = 0;
- reinit_completion(&fep->mdio_done);
-
if (is_c45) {
frame_start = FEC_MMFR_ST_C45;
@@ -1911,11 +1911,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
netdev_err(fep->netdev, "MDIO address write timeout\n");
- ret = -ETIMEDOUT;
goto out;
}
} else {
@@ -1931,12 +1929,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
- time_left = wait_for_completion_timeout(&fep->mdio_done,
- usecs_to_jiffies(FEC_MII_TIMEOUT));
- if (time_left == 0) {
+ ret = fec_enet_mdio_wait(fep);
+ if (ret)
netdev_err(fep->netdev, "MDIO write timeout\n");
- ret = -ETIMEDOUT;
- }
out:
pm_runtime_mark_last_busy(dev);
@@ -1986,8 +1981,12 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
return 0;
failed_clk_ref:
- if (fep->clk_ref)
- clk_disable_unprepare(fep->clk_ref);
+ if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
+ clk_disable_unprepare(fep->clk_ptp);
+ fep->ptp_clk_on = false;
+ mutex_unlock(&fep->ptp_clk_mutex);
+ }
failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
@@ -2065,9 +2064,11 @@ static int fec_enet_mii_init(struct platform_device *pdev)
static struct mii_bus *fec0_mii_bus;
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ bool suppress_preamble = false;
struct device_node *node;
int err = -ENXIO;
u32 mii_speed, holdtime;
+ u32 bus_freq;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2095,15 +2096,23 @@ static int fec_enet_mii_init(struct platform_device *pdev)
return -ENOENT;
}
+ bus_freq = 2500000; /* 2.5MHz by default */
+ node = of_get_child_by_name(pdev->dev.of_node, "mdio");
+ if (node) {
+ of_property_read_u32(node, "clock-frequency", &bus_freq);
+ suppress_preamble = of_property_read_bool(node,
+ "suppress-preamble");
+ }
+
/*
- * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ * Set MII speed (= clk_get_rate() / 2 * phy_speed)
*
* The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
* for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+ mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
if (fep->quirks & FEC_QUIRK_ENET_MAC)
mii_speed--;
if (mii_speed > 63) {
@@ -2130,8 +2139,24 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->phy_speed = mii_speed << 1 | holdtime << 8;
+ if (suppress_preamble)
+ fep->phy_speed |= BIT(7);
+
+ /* Clear MMFR to avoid to generate MII event by writing MSCR.
+ * MII event generation condition:
+ * - writing MSCR:
+ * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+ * mscr_reg_data_in[7:0] != 0
+ * - writing MMFR:
+ * - mscr[7:0]_not_zero
+ */
+ writel(0, fep->hwp + FEC_MII_DATA);
+
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ /* Clear any pending transaction complete indication */
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
fep->mii_bus = mdiobus_alloc();
if (fep->mii_bus == NULL) {
err = -ENOMEM;
@@ -2146,7 +2171,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
- node = of_get_child_by_name(pdev->dev.of_node, "mdio");
err = of_mdiobus_register(fep->mii_bus, node);
of_node_put(node);
if (err)
@@ -3452,19 +3476,23 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
}
static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
- struct fec_devinfo *dev_info,
struct device_node *np)
{
struct device_node *gpr_np;
+ u32 out_val[3];
int ret = 0;
- if (!dev_info)
- return 0;
-
- gpr_np = of_parse_phandle(np, "gpr", 0);
+ gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
if (!gpr_np)
return 0;
+ ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
+ ARRAY_SIZE(out_val));
+ if (ret) {
+ dev_dbg(&fep->pdev->dev, "no stop mode property\n");
+ return ret;
+ }
+
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
if (IS_ERR(fep->stop_gpr.gpr)) {
dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
@@ -3473,8 +3501,8 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
goto out;
}
- fep->stop_gpr.reg = dev_info->stop_gpr_reg;
- fep->stop_gpr.bit = dev_info->stop_gpr_bit;
+ fep->stop_gpr.reg = out_val[1];
+ fep->stop_gpr.bit = out_val[2];
out:
of_node_put(gpr_np);
@@ -3551,7 +3579,7 @@ fec_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
- ret = fec_enet_init_stop_mode(fep, dev_info, np);
+ ret = fec_enet_init_stop_mode(fep, np);
if (ret)
goto failed_stop_mode;
@@ -3674,7 +3702,6 @@ fec_probe(struct platform_device *pdev)
fep->irq[i] = irq;
}
- init_completion(&fep->mdio_done);
ret = fec_enet_mii_init(pdev);
if (ret)
goto failed_mii_init;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8aace2de0cc9..9a907947ba19 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -697,9 +697,9 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return rc;
if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
- is_c45 = 1;
+ is_c45 = true;
else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
- is_c45 = 0;
+ is_c45 = false;
else
return -ENODATA;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 948e67ef30fd..1ffe8fac702d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -24,7 +24,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
- HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */
HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
@@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
+ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
@@ -70,6 +71,10 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
};
+enum hclge_mbx_tbl_cfg_subcode {
+ HCLGE_MBX_VPORT_LIST_CLEAR,
+};
+
#define HCLGE_MBX_MAX_MSG_SIZE 14
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 5587605d6deb..d041cac9a487 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -145,7 +145,6 @@ enum hnae3_reset_notify_type {
HNAE3_DOWN_CLIENT,
HNAE3_INIT_CLIENT,
HNAE3_UNINIT_CLIENT,
- HNAE3_RESTORE_CLIENT,
};
enum hnae3_hw_error_type {
@@ -233,7 +232,6 @@ struct hnae3_ae_dev {
struct list_head node;
u32 flag;
unsigned long hw_err_reset_req;
- enum hnae3_reset_type reset_type;
void *priv;
};
@@ -270,6 +268,8 @@ struct hnae3_ae_dev {
* Set loopback
* set_promisc_mode
* Set promisc mode
+ * request_update_promisc_mode
+ * request to hclge(vf) to update promisc mode
* set_mtu()
* set mtu
* get_pauseparam()
@@ -354,8 +354,6 @@ struct hnae3_ae_dev {
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
- * restore_vlan_table()
- * Restore vlan filter entries after reset
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
@@ -375,6 +373,8 @@ struct hnae3_ae_dev {
* Set the max tx rate of specified vf.
* set_vf_mac
* Configure the default MAC for specified VF
+ * get_module_eeprom
+ * Get the optical module eeprom info.
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -408,6 +408,7 @@ struct hnae3_ae_ops {
int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc);
+ void (*request_update_promisc_mode)(struct hnae3_handle *handle);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle,
@@ -525,7 +526,6 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd);
int (*get_fd_all_rules)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd, u32 *rule_locs);
- int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
@@ -539,7 +539,6 @@ struct hnae3_ae_ops {
void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
- void (*restore_vlan_table)(struct hnae3_handle *handle);
int (*get_vf_config)(struct hnae3_handle *handle, int vf,
struct ifla_vf_info *ivf);
int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
@@ -550,6 +549,9 @@ struct hnae3_ae_ops {
int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
int min_tx_rate, int max_tx_rate, bool force);
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
+ int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data);
+ bool (*get_cmdq_stat)(struct hnae3_handle *handle);
};
struct hnae3_dcb_ops {
@@ -619,16 +621,6 @@ struct hnae3_roce_private_info {
unsigned long state;
};
-struct hnae3_unic_private_info {
- struct net_device *netdev;
- u16 rx_buf_len;
- u16 num_tx_desc;
- u16 num_rx_desc;
-
- u16 num_tqps; /* total number of tqps in this handle */
- struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
-};
-
#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0)
#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
@@ -654,7 +646,6 @@ struct hnae3_handle {
union {
struct net_device *netdev; /* first member */
struct hnae3_knic_private_info kinfo;
- struct hnae3_unic_private_info uinfo;
struct hnae3_roce_private_info rinfo;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index e1d88095a77e..fe7fb565da19 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -262,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump mac tnl status\n");
dev_info(&h->pdev->dev, "dump loopback\n");
dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
+ dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
+ dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
@@ -270,7 +272,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
" [igu egu <port_id>] [rpu <tc_queue_num>]",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
strncat(printf_buf + strlen(printf_buf),
- " [rtc] [ppp] [rcb] [tqp <queue_num>]]\n",
+ " [rtc] [ppp] [rcb] [tqp <queue_num>] [mac]]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index da98fd7c8eca..b14f2abc2425 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,7 +15,6 @@
#include <linux/aer.h>
#include <linux/skbuff.h>
#include <linux/sctp.h>
-#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
@@ -41,10 +40,8 @@
} while (0)
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
-static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
-const char hns3_driver_version[] = VERMAGIC_STRING;
static const char hns3_driver_string[] =
"Hisilicon Ethernet Network Driver for Hip08 Family";
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
@@ -550,6 +547,13 @@ static int hns3_nic_uc_unsync(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ /* need ignore the request of removing device address, because
+ * we store the device address and other addresses of uc list
+ * in the function's mac filter list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
if (h->ae_algo->ops->rm_uc_addr)
return h->ae_algo->ops->rm_uc_addr(h, addr);
@@ -597,34 +601,25 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
u8 new_flags;
- int ret;
new_flags = hns3_get_netdev_flags(netdev);
- ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
- if (ret) {
- netdev_err(netdev, "sync uc address fail\n");
- if (ret == -ENOSPC)
- new_flags |= HNAE3_OVERFLOW_UPE;
- }
-
- if (netdev->flags & IFF_MULTICAST) {
- ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
- hns3_nic_mc_unsync);
- if (ret) {
- netdev_err(netdev, "sync mc address fail\n");
- if (ret == -ENOSPC)
- new_flags |= HNAE3_OVERFLOW_MPE;
- }
- }
+ __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
+ __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
/* User mode Promisc mode enable and vlan filtering is disabled to
- * let all packets in. MAC-VLAN Table overflow Promisc enabled and
- * vlan fitering is enabled
+ * let all packets in.
*/
- hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
h->netdev_flags = new_flags;
- hns3_update_promisc_mode(netdev, new_flags);
+ hns3_request_update_promisc_mode(h);
+}
+
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (ops->request_update_promisc_mode)
+ ops->request_update_promisc_mode(handle);
}
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
@@ -1450,9 +1445,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
bd_num += ret;
- if (!skb_has_frag_list(skb))
- goto out;
-
skb_walk_frags(skb, frag_skb) {
ret = hns3_fill_skb_to_desc(ring, frag_skb,
DESC_TYPE_FRAGLIST_SKB);
@@ -1461,7 +1453,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
bd_num += ret;
}
-out:
+
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
@@ -1552,12 +1544,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
- if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- h->ae_algo->ops->enable_vlan_filter) {
- enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
- h->ae_algo->ops->enable_vlan_filter(h, enable);
- }
-
if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
h->ae_algo->ops->enable_hw_strip_rxvtag) {
enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
@@ -2107,7 +2093,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
- ae_dev->reset_type = HNAE3_NONE_RESET;
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
@@ -3909,9 +3894,11 @@ static int hns3_init_mac_addr(struct net_device *netdev)
eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
- } else {
+ } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
ether_addr_copy(netdev->dev_addr, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
+ } else {
+ return 0;
}
if (h->ae_algo->ops->set_mac_addr)
@@ -3939,17 +3926,6 @@ static void hns3_uninit_phy(struct net_device *netdev)
h->ae_algo->ops->mac_disconnect_phy(h);
}
-static int hns3_restore_fd_rules(struct net_device *netdev)
-{
- struct hnae3_handle *h = hns3_get_handle(netdev);
- int ret = 0;
-
- if (h->ae_algo->ops->restore_fd_rules)
- ret = h->ae_algo->ops->restore_fd_rules(h);
-
- return ret;
-}
-
static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -4121,8 +4097,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_remove_hw_addr(netdev);
-
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
@@ -4193,56 +4167,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
return hns3_nic_set_real_num_queue(ndev);
}
-static int hns3_recover_hw_addr(struct net_device *ndev)
-{
- struct netdev_hw_addr_list *list;
- struct netdev_hw_addr *ha, *tmp;
- int ret = 0;
-
- netif_addr_lock_bh(ndev);
- /* go through and sync uc_addr entries to the device */
- list = &ndev->uc;
- list_for_each_entry_safe(ha, tmp, &list->list, list) {
- ret = hns3_nic_uc_sync(ndev, ha->addr);
- if (ret)
- goto out;
- }
-
- /* go through and sync mc_addr entries to the device */
- list = &ndev->mc;
- list_for_each_entry_safe(ha, tmp, &list->list, list) {
- ret = hns3_nic_mc_sync(ndev, ha->addr);
- if (ret)
- goto out;
- }
-
-out:
- netif_addr_unlock_bh(ndev);
- return ret;
-}
-
-static void hns3_remove_hw_addr(struct net_device *netdev)
-{
- struct netdev_hw_addr_list *list;
- struct netdev_hw_addr *ha, *tmp;
-
- hns3_nic_uc_unsync(netdev, netdev->dev_addr);
-
- netif_addr_lock_bh(netdev);
- /* go through and unsync uc_addr entries to the device */
- list = &netdev->uc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- hns3_nic_uc_unsync(netdev, ha->addr);
-
- /* go through and unsync mc_addr entries to the device */
- list = &netdev->mc;
- list_for_each_entry_safe(ha, tmp, &list->list, list)
- if (ha->refcount > 1)
- hns3_nic_mc_unsync(netdev, ha->addr);
-
- netif_addr_unlock_bh(netdev);
-}
-
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
@@ -4401,7 +4325,6 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
@@ -4409,15 +4332,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
- /* it is cumbersome for hardware to pick-and-choose entries for deletion
- * from table space. Hence, for function reset software intervention is
- * required to delete the entries
- */
- if (hns3_dev_ongoing_func_reset(ae_dev)) {
- hns3_remove_hw_addr(ndev);
- hns3_del_all_fd_rules(ndev, false);
- }
-
if (!netif_running(ndev))
return 0;
@@ -4484,6 +4398,9 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
goto err_init_irq_fail;
}
+ if (!hns3_is_phys_func(handle->pdev))
+ hns3_init_mac_addr(netdev);
+
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
@@ -4509,33 +4426,6 @@ err_put_ring:
return ret;
}
-static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
-{
- struct net_device *netdev = handle->kinfo.netdev;
- bool vlan_filter_enable;
- int ret;
-
- ret = hns3_init_mac_addr(netdev);
- if (ret)
- return ret;
-
- ret = hns3_recover_hw_addr(netdev);
- if (ret)
- return ret;
-
- ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
- if (ret)
- return ret;
-
- vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
- hns3_enable_vlan_filter(netdev, vlan_filter_enable);
-
- if (handle->ae_algo->ops->restore_vlan_table)
- handle->ae_algo->ops->restore_vlan_table(handle);
-
- return hns3_restore_fd_rules(netdev);
-}
-
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
@@ -4585,9 +4475,6 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
case HNAE3_UNINIT_CLIENT:
ret = hns3_reset_notify_uninit_enet(handle);
break;
- case HNAE3_RESTORE_CLIENT:
- ret = hns3_reset_notify_restore_enet(handle);
- break;
default:
break;
}
@@ -4765,4 +4652,3 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("pci:hns-nic");
-MODULE_VERSION(HNS3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index abefd7a179f7..66cd4395f781 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -8,10 +8,6 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.0"
-
-extern const char hns3_driver_version[];
-
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
@@ -50,23 +46,6 @@ enum hns3_nic_state {
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
#define HNS3_RING_EN_REG 0x00090
-#define HNS3_RING_T0_BE_RST 0x00094
-#define HNS3_RING_COULD_BE_RST 0x00098
-#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
-
-#define HNS3_RING_INTMSK_RXWL_REG 0x000A0
-#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4
-#define HNS3_RX_RING_INT_STS_REG 0x000A8
-#define HNS3_RING_INTMSK_TXWL_REG 0x000AC
-#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0
-#define HNS3_TX_RING_INT_STS_REG 0x000B4
-#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8
-#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC
-#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4
-#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8
-
-#define HNS3_RING_MB_CTRL_REG 0x00100
-#define HNS3_RING_MB_DATA_BASE_REG 0x00200
#define HNS3_TX_REG_OFFSET 0x40
@@ -490,21 +469,8 @@ struct hns3_enet_tqp_vector {
unsigned long last_jiffies;
} ____cacheline_internodealigned_in_smp;
-enum hns3_udp_tnl_type {
- HNS3_UDP_TNL_VXLAN,
- HNS3_UDP_TNL_GENEVE,
- HNS3_UDP_TNL_MAX,
-};
-
-struct hns3_udp_tunnel {
- u16 dst_port;
- int used;
-};
-
struct hns3_nic_priv {
struct hnae3_handle *ae_handle;
- u32 enet_ver;
- u32 port_id;
struct net_device *netdev;
struct device *dev;
@@ -516,19 +482,10 @@ struct hns3_nic_priv {
struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num;
- /* The most recently read link state */
- int link;
u64 tx_timeout_count;
unsigned long state;
- struct timer_list service_timer;
-
- struct work_struct service_task;
-
- struct notifier_block notifier_block;
- /* Vxlan/Geneve information */
- struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
};
@@ -580,15 +537,6 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
writel(value, reg_addr + reg);
}
-static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
-{
- return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
- ae_dev->reset_type == HNAE3_FLR_RESET ||
- ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
- ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
- ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
-}
-
#define hns3_read_dev(a, reg) \
hns3_read_reg((a)->io_base, (reg))
@@ -662,6 +610,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 28b81f24afa1..6b1545f982aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
#include "hns3_enet.h"
@@ -12,6 +13,11 @@ struct hns3_stats {
int stats_offset;
};
+struct hns3_sfp_type {
+ u8 type;
+ u8 ext_type;
+};
+
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
@@ -99,7 +105,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
h->ae_algo->ops->set_promisc_mode(h, true, true);
} else {
/* recover promisc mode before loopback test */
- hns3_update_promisc_mode(ndev, h->netdev_flags);
+ hns3_request_update_promisc_mode(h);
vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(ndev, vlan_filter_enable);
}
@@ -546,10 +552,6 @@ static void hns3_get_drvinfo(struct net_device *netdev,
return;
}
- strncpy(drvinfo->version, hns3_driver_version,
- sizeof(drvinfo->version));
- drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
-
strncpy(drvinfo->driver, h->pdev->driver->name,
sizeof(drvinfo->driver));
drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
@@ -771,8 +773,13 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
/* Only support ksettings_set for netdev with phy attached for now */
- if (netdev->phydev)
+ if (netdev->phydev) {
+ if (cmd->base.speed == SPEED_1000 &&
+ cmd->base.autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+ }
if (handle->pdev->revision == 0x20)
return -EOPNOTSUPP;
@@ -1390,6 +1397,73 @@ static int hns3_set_fecparam(struct net_device *netdev,
return ops->set_fec(handle, fec_mode);
}
+static int hns3_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+#define HNS3_SFF_8636_V1_3 0x03
+
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_sfp_type sfp_type;
+ int ret;
+
+ if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ memset(&sfp_type, 0, sizeof(sfp_type));
+ ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8),
+ (u8 *)&sfp_type);
+ if (ret)
+ return ret;
+
+ switch (sfp_type.type) {
+ case SFF8024_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF8024_ID_QSFP_8438:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case SFF8024_ID_QSFP_8436_8636:
+ if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ }
+ break;
+ case SFF8024_ID_QSFP28_8636:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Optical module unknown: %#x\n",
+ sfp_type.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns3_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ return ops->get_module_eeprom(handle, ee->offset, ee->len, data);
+}
+
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
@@ -1453,6 +1527,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_module_info = hns3_get_module_info,
+ .get_module_eeprom = hns3_get_module_eeprom,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 0fb61d440d3b..6c28c8f6292c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 7f509eff562e..1d6c328bd9fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -11,8 +11,6 @@
#include "hnae3.h"
#include "hclge_main.h"
-#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
-
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclge_ring_space(struct hclge_cmq_ring *ring)
@@ -426,6 +424,9 @@ int hclge_cmd_init(struct hclge_dev *hdev)
* reset may happen when lower level reset is being processed.
*/
if ((hclge_is_reset_pending(hdev))) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init cmd since reset %#lx pending\n",
+ hdev->reset_pending);
ret = -EBUSY;
goto err_cmd_init;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 96498d9b4754..463f29151ef0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -184,11 +184,11 @@ enum hclge_opcode_type {
/* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
- HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
- HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
@@ -270,6 +270,8 @@ enum hclge_opcode_type {
HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
/* SFP command */
+ HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
+ HCLGE_OPC_GET_SFP_EXIST = 0x7101,
HCLGE_OPC_GET_SFP_INFO = 0x7104,
/* Error INT commands */
@@ -733,31 +735,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-struct hclge_mac_vlan_add_cmd {
- __le16 flags;
- __le16 mac_addr_hi16;
- __le32 mac_addr_lo32;
- __le32 mac_addr_msk_hi32;
- __le16 mac_addr_msk_lo16;
- __le16 vlan_tag;
- __le16 ingress_port;
- __le16 egress_port;
- u8 rsv[4];
-};
-
-#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0
-struct hclge_mac_vlan_remove_cmd {
- __le16 flags;
- __le16 mac_addr_hi16;
- __le32 mac_addr_lo32;
- __le32 mac_addr_msk_hi32;
- __le16 mac_addr_msk_lo16;
- __le16 vlan_tag;
- __le16 ingress_port;
- __le16 egress_port;
- u8 rsv[4];
-};
-
struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type;
u8 vlan_fe;
@@ -907,8 +884,8 @@ struct hclge_cfg_tso_status_cmd {
#define HCLGE_GRO_EN_B 0
struct hclge_cfg_gro_status_cmd {
- __le16 gro_en;
- u8 rsv[22];
+ u8 gro_en;
+ u8 rsv[23];
};
#define HCLGE_TSO_MSS_MIN 256
@@ -1079,6 +1056,19 @@ struct hclge_firmware_compat_cmd {
u8 rsv[20];
};
+#define HCLGE_SFP_INFO_CMD_NUM 6
+#define HCLGE_SFP_INFO_BD0_LEN 20
+#define HCLGE_SFP_INFO_BDX_LEN 24
+#define HCLGE_SFP_INFO_MAX_LEN \
+ (HCLGE_SFP_INFO_BD0_LEN + \
+ (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN)
+
+struct hclge_sfp_info_bd0_cmd {
+ __le16 offset;
+ __le16 read_len;
+ u8 data[HCLGE_SFP_INFO_BD0_LEN];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 17228288d4df..26f6f068b01d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -143,7 +143,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
- buf_len = sizeof(struct hclge_desc) * bd_num;
+ buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src)
return;
@@ -173,6 +173,114 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
kfree(desc_src);
}
+static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
+{
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_desc desc;
+ u32 loop_en;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac enable status, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_mac_mode_cmd *)desc.data;
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+
+ dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
+ dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
+ dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
+ dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
+ dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
+ dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
+ dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
+ dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
+}
+
+static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
+{
+ struct hclge_config_max_frm_size_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac frame size, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
+ le16_to_cpu(req->max_frm_size));
+ dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
+}
+
+static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
+{
+#define HCLGE_MAC_SPEED_SHIFT 0
+#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
+#define HCLGE_MAC_DUPLEX_SHIFT 7
+
+ struct hclge_config_mac_speed_dup_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac speed duplex, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ dev_info(&hdev->pdev->dev, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
+}
+
+static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
+{
+ hclge_dbg_dump_mac_enable_status(hdev);
+
+ hclge_dbg_dump_mac_frame_size(hdev);
+
+ hclge_dbg_dump_mac_speed_duplex(hdev);
+}
+
static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
@@ -304,6 +412,11 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
}
}
+ if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
+ hclge_dbg_dump_mac(hdev);
+ has_dump = true;
+ }
+
if (strncmp(cmd_buf, "dcb", 3) == 0) {
hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
has_dump = true;
@@ -578,7 +691,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
enum hclge_opcode_type cmd;
struct hclge_desc desc;
int queue_id, group_id;
- u32 qset_maping[32];
+ u32 qset_mapping[32];
int tc_id, qset_id;
int pri_id, ret;
u32 i;
@@ -633,7 +746,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
if (ret)
goto err_tm_map_cmd_send;
- qset_maping[group_id] =
+ qset_mapping[group_id] =
le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
}
@@ -643,11 +756,11 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
for (group_id = 0; group_id < 4; group_id++) {
dev_info(&hdev->pdev->dev,
"%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
- group_id * 256, qset_maping[(u32)(i + 7)],
- qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
- qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
- qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
- qset_maping[i]);
+ group_id * 256, qset_mapping[(u32)(i + 7)],
+ qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
+ qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
+ qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
+ qset_mapping[i]);
i += 8;
}
@@ -1145,6 +1258,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_PARAM_NUM 2
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
@@ -1154,13 +1268,17 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
int ret;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
- if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
- length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
- dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
+ if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Too few parameters, num = %d.\n", ret);
return;
}
- if (offset < 0 || length <= 0) {
- dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
+
+ if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
+ length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
+ dev_err(&hdev->pdev->dev,
+ "Invalid input, offset = %d, length = %d.\n",
+ offset, length);
return;
}
@@ -1328,6 +1446,49 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
hclge_dbg_dump_qs_shaper_single(hdev, qsid);
}
+static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
+ bool is_unicast)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_vport *vport;
+ struct list_head *list;
+ u32 func_id;
+ int ret;
+
+ ret = kstrtouint(cmd_buf, 0, &func_id);
+ if (ret < 0) {
+ dev_err(&hdev->pdev->dev,
+ "dump mac list: bad command string, ret = %d\n", ret);
+ return -EINVAL;
+ }
+
+ if (func_id >= hdev->num_alloc_vport) {
+ dev_err(&hdev->pdev->dev,
+ "function id(%u) is out of range(0-%u)\n", func_id,
+ hdev->num_alloc_vport - 1);
+ return -EINVAL;
+ }
+
+ vport = &hdev->vport[func_id];
+
+ list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
+
+ dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
+ func_id, is_unicast ? "uc" : "mc");
+ dev_info(&hdev->pdev->dev, "mac address state\n");
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ dev_info(&hdev->pdev->dev, "%pM %d\n",
+ mac_node->mac_addr, mac_node->state);
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
+}
+
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
@@ -1372,6 +1533,14 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
hclge_dbg_dump_qs_shaper(hdev,
&cmd_buf[sizeof("dump qs shaper")]);
+ } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
+ hclge_dbg_dump_mac_list(hdev,
+ &cmd_buf[sizeof("dump uc mac list")],
+ true);
+ } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
+ hclge_dbg_dump_mac_list(hdev,
+ &cmd_buf[sizeof("dump mc mac list")],
+ false);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 876fd81ad2f1..608fe26fc3fe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -16,7 +16,6 @@
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
-#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800
#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a758f9ae32be..96bfad52630d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -62,14 +62,16 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
-static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
- u16 *allocated_size, bool is_alloc);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev);
+static void hclge_sync_mac_table(struct hclge_dev *hdev);
+static void hclge_restore_hw_table(struct hclge_dev *hdev);
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
+
static struct hnae3_ae_algo ae_algo;
static struct workqueue_struct *hclge_wq;
@@ -550,7 +552,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -570,7 +572,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATUS,
+ HCLGE_OPC_QUERY_TX_STATS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
@@ -1361,10 +1363,8 @@ static int hclge_configure(struct hclge_dev *hdev)
int ret;
ret = hclge_get_cfg(hdev, &cfg);
- if (ret) {
- dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
+ if (ret)
return ret;
- }
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
@@ -1387,7 +1387,8 @@ static int hclge_configure(struct hclge_dev *hdev)
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
- dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
+ cfg.default_speed, ret);
return ret;
}
@@ -1429,26 +1430,17 @@ static int hclge_configure(struct hclge_dev *hdev)
return ret;
}
-static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
- unsigned int tso_mss_max)
+static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
+ u16 tso_mss_max)
{
struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
- u16 tso_mss;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
-
- tso_mss = 0;
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
- req->tso_mss_min = cpu_to_le16(tso_mss);
-
- tso_mss = 0;
- hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
- req->tso_mss_max = cpu_to_le16(tso_mss);
+ req->tso_mss_min = cpu_to_le16(tso_mss_min);
+ req->tso_mss_max = cpu_to_le16(tso_mss_max);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -1465,7 +1457,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_gro_status_cmd *)desc.data;
- req->gro_en = cpu_to_le16(en ? 1 : 0);
+ req->gro_en = en ? 1 : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -1687,6 +1679,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
+ spin_lock_init(&vport->mac_list_lock);
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -2965,13 +2958,11 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
+ u32 cmdq_src_reg, msix_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
- msix_src_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -2981,7 +2972,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*
* check for vector0 reset event sources
*/
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
@@ -2990,7 +2981,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
@@ -3480,7 +3471,7 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
- HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
+ HCLGE_MISC_VECTOR_INT_STS);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
@@ -3729,22 +3720,13 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
}
static int hclge_reset_prepare(struct hclge_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
- /* Initialize ae_dev reset status as well, in case enet layer wants to
- * know if device is undergoing reset
- */
- ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.reset_cnt++;
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
@@ -3780,11 +3762,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hclge_clear_reset_cause(hdev);
- ret = hclge_reset_prepare_up(hdev);
- if (ret)
- return ret;
-
-
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
* times
@@ -3793,6 +3770,10 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
return ret;
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ return ret;
+
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
@@ -3806,7 +3787,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->last_reset_time = jiffies;
hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
- ae_dev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
/* if default_reset_request has a higher level reset request,
@@ -3973,6 +3953,8 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
* updated when it is triggered by mbx.
*/
hclge_update_link_status(hdev);
+ hclge_sync_mac_table(hdev);
+ hclge_sync_promisc_mode(hdev);
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed;
@@ -4722,7 +4704,8 @@ static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
- "Set promisc mode fail, status is %d.\n", ret);
+ "failed to set vport %d promisc mode, ret = %d.\n",
+ param->vf_id, ret);
return ret;
}
@@ -4772,6 +4755,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
en_bc_pmc);
}
+static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+}
+
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
{
struct hclge_get_fd_mode_cmd *req;
@@ -4822,7 +4813,8 @@ static int hclge_get_fd_allocation(struct hclge_dev *hdev,
return ret;
}
-static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
+static int hclge_set_fd_key_config(struct hclge_dev *hdev,
+ enum HCLGE_FD_STAGE stage_num)
{
struct hclge_set_fd_key_config_cmd *req;
struct hclge_fd_key_cfg *stage;
@@ -4876,9 +4868,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
return -EOPNOTSUPP;
}
- hdev->fd_cfg.proto_support =
- TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
- UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
@@ -4892,11 +4881,9 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* If use max 400bit key, we can support tuples for ether type */
- if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
- hdev->fd_cfg.proto_support |= ETHER_FLOW;
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
key_cfg->tuple_active |=
BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
- }
/* roce_type is used to filter roce frames
* dst_vport is used to specify the rule
@@ -5006,8 +4993,6 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
return true;
switch (tuple_bit) {
- case 0:
- return false;
case BIT(INNER_DST_MAC):
for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
@@ -5165,9 +5150,10 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
u8 *cur_key_x, *cur_key_y;
- unsigned int i;
- int ret, tuple_size;
u8 meta_data_region;
+ u8 tuple_size;
+ int ret;
+ u32 i;
memset(key_x, 0, sizeof(key_x));
memset(key_y, 0, sizeof(key_y));
@@ -5244,172 +5230,255 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
}
-static int hclge_fd_check_spec(struct hclge_dev *hdev,
- struct ethtool_rx_flow_spec *fs, u32 *unused)
+static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
+ u32 *unused_tuple)
{
- struct ethtool_tcpip4_spec *tcp_ip4_spec;
- struct ethtool_usrip4_spec *usr_ip4_spec;
- struct ethtool_tcpip6_spec *tcp_ip6_spec;
- struct ethtool_usrip6_spec *usr_ip6_spec;
- struct ethhdr *ether_spec;
-
- if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ if (!spec || !unused_tuple)
return -EINVAL;
- if (!(fs->flow_type & hdev->fd_cfg.proto_support))
- return -EOPNOTSUPP;
-
- if ((fs->flow_type & FLOW_EXT) &&
- (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
- dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
- return -EOPNOTSUPP;
- }
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
- switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
- case SCTP_V4_FLOW:
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (!tcp_ip4_spec->ip4src)
- *unused |= BIT(INNER_SRC_IP);
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (!tcp_ip4_spec->ip4dst)
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
- if (!tcp_ip4_spec->psrc)
- *unused |= BIT(INNER_SRC_PORT);
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
- if (!tcp_ip4_spec->pdst)
- *unused |= BIT(INNER_DST_PORT);
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (!tcp_ip4_spec->tos)
- *unused |= BIT(INNER_IP_TOS);
+ return 0;
+}
- break;
- case IP_USER_FLOW:
- usr_ip4_spec = &fs->h_u.usr_ip4_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (!usr_ip4_spec->ip4src)
- *unused |= BIT(INNER_SRC_IP);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
- if (!usr_ip4_spec->ip4dst)
- *unused |= BIT(INNER_DST_IP);
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (!usr_ip4_spec->tos)
- *unused |= BIT(INNER_IP_TOS);
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (!usr_ip4_spec->proto)
- *unused |= BIT(INNER_IP_PROTO);
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
- if (usr_ip4_spec->l4_4_bytes)
- return -EOPNOTSUPP;
+ if (!spec->proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
- if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
- return -EOPNOTSUPP;
+ if (spec->l4_4_bytes)
+ return -EOPNOTSUPP;
- break;
- case SCTP_V6_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
- tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS);
+ if (spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
- /* check whether src/dst ip address used */
- if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
- !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
- *unused |= BIT(INNER_SRC_IP);
+ return 0;
+}
- if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
- !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
- *unused |= BIT(INNER_DST_IP);
+static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (!tcp_ip6_spec->psrc)
- *unused |= BIT(INNER_SRC_PORT);
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS);
- if (!tcp_ip6_spec->pdst)
- *unused |= BIT(INNER_DST_PORT);
+ /* check whether src/dst ip address used */
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
+ !spec->ip6src[2] && !spec->ip6src[3])
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (tcp_ip6_spec->tclass)
- return -EOPNOTSUPP;
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
+ !spec->ip6dst[2] && !spec->ip6dst[3])
+ *unused_tuple |= BIT(INNER_DST_IP);
- break;
- case IPV6_USER_FLOW:
- usr_ip6_spec = &fs->h_u.usr_ip6_spec;
- *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
- BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
- BIT(INNER_DST_PORT);
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
- /* check whether src/dst ip address used */
- if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
- !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
- *unused |= BIT(INNER_SRC_IP);
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
- if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
- !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
- *unused |= BIT(INNER_DST_IP);
+ if (spec->tclass)
+ return -EOPNOTSUPP;
- if (!usr_ip6_spec->l4_proto)
- *unused |= BIT(INNER_IP_PROTO);
+ return 0;
+}
- if (usr_ip6_spec->tclass)
- return -EOPNOTSUPP;
+static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
- if (usr_ip6_spec->l4_4_bytes)
- return -EOPNOTSUPP;
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
- break;
- case ETHER_FLOW:
- ether_spec = &fs->h_u.ether_spec;
- *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
- BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
- BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+ /* check whether src/dst ip address used */
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
+ !spec->ip6src[2] && !spec->ip6src[3])
+ *unused_tuple |= BIT(INNER_SRC_IP);
- if (is_zero_ether_addr(ether_spec->h_source))
- *unused |= BIT(INNER_SRC_MAC);
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
+ !spec->ip6dst[2] && !spec->ip6dst[3])
+ *unused_tuple |= BIT(INNER_DST_IP);
- if (is_zero_ether_addr(ether_spec->h_dest))
- *unused |= BIT(INNER_DST_MAC);
+ if (!spec->l4_proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
- if (!ether_spec->h_proto)
- *unused |= BIT(INNER_ETH_TYPE);
+ if (spec->tclass)
+ return -EOPNOTSUPP;
- break;
- default:
+ if (spec->l4_4_bytes)
return -EOPNOTSUPP;
- }
- if ((fs->flow_type & FLOW_EXT)) {
- if (fs->h_ext.vlan_etype)
+ return 0;
+}
+
+static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(spec->h_source))
+ *unused_tuple |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(spec->h_dest))
+ *unused_tuple |= BIT(INNER_DST_MAC);
+
+ if (!spec->h_proto)
+ *unused_tuple |= BIT(INNER_ETH_TYPE);
+
+ return 0;
+}
+
+static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->h_ext.vlan_etype) {
+ dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
return -EOPNOTSUPP;
+ }
+
if (!fs->h_ext.vlan_tci)
- *unused |= BIT(INNER_VLAN_TAG_FST);
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
- if (fs->m_ext.vlan_tci) {
- if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
- return -EINVAL;
+ if (fs->m_ext.vlan_tci &&
+ be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
+ ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
+ return -EINVAL;
}
} else {
- *unused |= BIT(INNER_VLAN_TAG_FST);
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
}
if (fs->flow_type & FLOW_MAC_EXT) {
- if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "FLOW_MAC_EXT is not supported in current fd mode!\n");
return -EOPNOTSUPP;
+ }
if (is_zero_ether_addr(fs->h_ext.h_dest))
- *unused |= BIT(INNER_DST_MAC);
+ *unused_tuple |= BIT(INNER_DST_MAC);
else
- *unused &= ~(BIT(INNER_DST_MAC));
+ *unused_tuple &= ~BIT(INNER_DST_MAC);
}
return 0;
}
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ u32 flow_type;
+ int ret;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config fd rules, invalid rule location: %u, max is %u\n.",
+ fs->location,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
+ return -EINVAL;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
+ unused_tuple);
+ break;
+ case IP_USER_FLOW:
+ ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
+ unused_tuple);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
+ unused_tuple);
+ break;
+ case IPV6_USER_FLOW:
+ ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
+ unused_tuple);
+ break;
+ case ETHER_FLOW:
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "ETHER_FLOW is not supported in current fd mode!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
+ unused_tuple);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported protocol type, protocol type = %#x\n",
+ flow_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check flow union tuple, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
+}
+
static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
{
struct hclge_fd_rule *rule = NULL;
@@ -5618,7 +5687,7 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break;
}
- if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->flow_type & FLOW_EXT) {
rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
}
@@ -5673,22 +5742,23 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_dev_fd_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "flow table director is not supported\n");
return -EOPNOTSUPP;
+ }
if (!hdev->fd_en) {
- dev_warn(&hdev->pdev->dev,
- "Please enable flow director first\n");
+ dev_err(&hdev->pdev->dev,
+ "please enable flow director first\n");
return -EOPNOTSUPP;
}
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
ret = hclge_fd_check_spec(hdev, fs, &unused);
- if (ret) {
- dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
+ if (ret)
return ret;
- }
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
action = HCLGE_FD_ACTION_DROP_PACKET;
@@ -5729,7 +5799,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
}
rule->flow_type = fs->flow_type;
-
rule->location = fs->location;
rule->unused_tuple = unused;
rule->vf_id = dst_vport_id;
@@ -5877,6 +5946,149 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
return 0;
}
+static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip4_spec *spec,
+ struct ethtool_tcpip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+}
+
+static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip4_spec *spec,
+ struct ethtool_usrip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ spec->proto = rule->tuples.ip_proto;
+ spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ spec->ip_ver = ETH_RX_NFC_IP4;
+}
+
+static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip6_spec *spec,
+ struct ethtool_tcpip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src,
+ rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst,
+ rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
+ IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
+ IPV6_SIZE);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+}
+
+static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip6_spec *spec,
+ struct ethtool_usrip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src,
+ rule->tuples_mask.src_ip, IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
+
+ spec->l4_proto = rule->tuples.ip_proto;
+ spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+}
+
+static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
+ struct ethhdr *spec,
+ struct ethhdr *spec_mask)
+{
+ ether_addr_copy(spec->h_source, rule->tuples.src_mac);
+ ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
+
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(spec_mask->h_source);
+ else
+ ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
+
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(spec_mask->h_dest);
+ else
+ ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
+
+ spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
+ spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+}
+
+static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ fs->m_ext.vlan_tci =
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+ cpu_to_be16(VLAN_VID_MASK) :
+ cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+ }
+}
+
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -5909,162 +6121,34 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
case SCTP_V4_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
- fs->h_u.tcp_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
-
- fs->h_u.tcp_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
-
- fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
- fs->m_u.tcp_ip4_spec.psrc =
- rule->unused_tuple & BIT(INNER_SRC_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.src_port);
-
- fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
- fs->m_u.tcp_ip4_spec.pdst =
- rule->unused_tuple & BIT(INNER_DST_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.dst_port);
-
- fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
- fs->m_u.tcp_ip4_spec.tos =
- rule->unused_tuple & BIT(INNER_IP_TOS) ?
- 0 : rule->tuples_mask.ip_tos;
-
+ hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec);
break;
case IP_USER_FLOW:
- fs->h_u.usr_ip4_spec.ip4src =
- cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
- fs->m_u.tcp_ip4_spec.ip4src =
- rule->unused_tuple & BIT(INNER_SRC_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
-
- fs->h_u.usr_ip4_spec.ip4dst =
- cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
- fs->m_u.usr_ip4_spec.ip4dst =
- rule->unused_tuple & BIT(INNER_DST_IP) ?
- 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
-
- fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
- fs->m_u.usr_ip4_spec.tos =
- rule->unused_tuple & BIT(INNER_IP_TOS) ?
- 0 : rule->tuples_mask.ip_tos;
-
- fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
- fs->m_u.usr_ip4_spec.proto =
- rule->unused_tuple & BIT(INNER_IP_PROTO) ?
- 0 : rule->tuples_mask.ip_proto;
-
- fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
-
+ hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec);
break;
case SCTP_V6_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
- cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
-
- cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
-
- fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
- fs->m_u.tcp_ip6_spec.psrc =
- rule->unused_tuple & BIT(INNER_SRC_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.src_port);
-
- fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
- fs->m_u.tcp_ip6_spec.pdst =
- rule->unused_tuple & BIT(INNER_DST_PORT) ?
- 0 : cpu_to_be16(rule->tuples_mask.dst_port);
-
+ hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec);
break;
case IPV6_USER_FLOW:
- cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_SRC_IP))
- memset(fs->m_u.usr_ip6_spec.ip6src, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
-
- cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
- if (rule->unused_tuple & BIT(INNER_DST_IP))
- memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
- sizeof(int) * IPV6_SIZE);
- else
- cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
-
- fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
- fs->m_u.usr_ip6_spec.l4_proto =
- rule->unused_tuple & BIT(INNER_IP_PROTO) ?
- 0 : rule->tuples_mask.ip_proto;
-
- break;
- case ETHER_FLOW:
- ether_addr_copy(fs->h_u.ether_spec.h_source,
- rule->tuples.src_mac);
- if (rule->unused_tuple & BIT(INNER_SRC_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_source);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_source,
- rule->tuples_mask.src_mac);
-
- ether_addr_copy(fs->h_u.ether_spec.h_dest,
- rule->tuples.dst_mac);
- if (rule->unused_tuple & BIT(INNER_DST_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_dest);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_dest,
- rule->tuples_mask.dst_mac);
-
- fs->h_u.ether_spec.h_proto =
- cpu_to_be16(rule->tuples.ether_proto);
- fs->m_u.ether_spec.h_proto =
- rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
- 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
-
+ hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec);
break;
+ /* The flow type of fd rule has been checked before adding in to rule
+ * list. As other flow types have been handled, it must be ETHER_FLOW
+ * for the default case
+ */
default:
- spin_unlock_bh(&hdev->fd_rule_lock);
- return -EOPNOTSUPP;
- }
-
- if (fs->flow_type & FLOW_EXT) {
- fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
- fs->m_ext.vlan_tci =
- rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
- cpu_to_be16(VLAN_VID_MASK) :
- cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
+ &fs->m_u.ether_spec);
+ break;
}
- if (fs->flow_type & FLOW_MAC_EXT) {
- ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
- if (rule->unused_tuple & BIT(INNER_DST_MAC))
- eth_zero_addr(fs->m_u.ether_spec.h_dest);
- else
- ether_addr_copy(fs->m_u.ether_spec.h_dest,
- rule->tuples_mask.dst_mac);
- }
+ hclge_fd_get_ext_info(fs, rule);
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
@@ -6202,7 +6286,6 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
*/
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -EOPNOTSUPP;
}
@@ -6216,14 +6299,12 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOSPC;
}
rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOMEM;
}
@@ -6310,6 +6391,14 @@ static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
}
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+}
+
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6486,8 +6575,6 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
- hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
@@ -6834,8 +6921,22 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int hclge_vport_start(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
vport->last_active_jiffies = jiffies;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block)) {
+ if (vport->vport_id) {
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ } else {
+ hclge_restore_hw_table(hdev);
+ }
+ }
+
+ clear_bit(vport->vport_id, hdev->vport_config_block);
+
return 0;
}
@@ -6872,17 +6973,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
}
if (op == HCLGE_MAC_VLAN_ADD) {
- if ((!resp_code) || (resp_code == 1)) {
+ if (!resp_code || resp_code == 1)
return 0;
- } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for uc_overflow.\n");
- return -ENOSPC;
- } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for mc_overflow.\n");
+ else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
+ resp_code == HCLGE_ADD_MC_OVERFLOW)
return -ENOSPC;
- }
dev_err(&hdev->pdev->dev,
"add mac addr failed for undefined, code=%u.\n",
@@ -7106,52 +7201,8 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status;
}
-static int hclge_init_umv_space(struct hclge_dev *hdev)
-{
- u16 allocated_size = 0;
- int ret;
-
- ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
- true);
- if (ret)
- return ret;
-
- if (allocated_size < hdev->wanted_umv_size)
- dev_warn(&hdev->pdev->dev,
- "Alloc umv space failed, want %u, get %u\n",
- hdev->wanted_umv_size, allocated_size);
-
- mutex_init(&hdev->umv_mutex);
- hdev->max_umv_size = allocated_size;
- /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
- * preserve some unicast mac vlan table entries shared by pf
- * and its vfs.
- */
- hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
- hdev->share_umv_size = hdev->priv_umv_size +
- hdev->max_umv_size % (hdev->num_req_vfs + 2);
-
- return 0;
-}
-
-static int hclge_uninit_umv_space(struct hclge_dev *hdev)
-{
- int ret;
-
- if (hdev->max_umv_size > 0) {
- ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
- false);
- if (ret)
- return ret;
- hdev->max_umv_size = 0;
- }
- mutex_destroy(&hdev->umv_mutex);
-
- return 0;
-}
-
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
- u16 *allocated_size, bool is_alloc)
+ u16 *allocated_size)
{
struct hclge_umv_spc_alc_cmd *req;
struct hclge_desc desc;
@@ -7159,21 +7210,39 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
req = (struct hclge_umv_spc_alc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
- if (!is_alloc)
- hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
req->space_size = cpu_to_le32(space_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "%s umv space failed for cmd_send, ret =%d\n",
- is_alloc ? "allocate" : "free", ret);
+ dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
+ ret);
return ret;
}
- if (is_alloc && allocated_size)
- *allocated_size = le32_to_cpu(desc.data[1]);
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "failed to alloc umv space, want %u, get %u\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
return 0;
}
@@ -7188,21 +7257,25 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev)
vport->used_umv_num = 0;
}
- mutex_lock(&hdev->umv_mutex);
+ mutex_lock(&hdev->vport_lock);
hdev->share_umv_size = hdev->priv_umv_size +
- hdev->max_umv_size % (hdev->num_req_vfs + 2);
- mutex_unlock(&hdev->umv_mutex);
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ mutex_unlock(&hdev->vport_lock);
}
-static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
{
struct hclge_dev *hdev = vport->back;
bool is_full;
- mutex_lock(&hdev->umv_mutex);
+ if (need_lock)
+ mutex_lock(&hdev->vport_lock);
+
is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
hdev->share_umv_size == 0);
- mutex_unlock(&hdev->umv_mutex);
+
+ if (need_lock)
+ mutex_unlock(&hdev->vport_lock);
return is_full;
}
@@ -7211,7 +7284,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
{
struct hclge_dev *hdev = vport->back;
- mutex_lock(&hdev->umv_mutex);
if (is_free) {
if (vport->used_umv_num > hdev->priv_umv_size)
hdev->share_umv_size++;
@@ -7224,7 +7296,99 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
hdev->share_umv_size--;
vport->used_umv_num++;
}
- mutex_unlock(&hdev->umv_mutex);
+}
+
+static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
+ const u8 *mac_addr)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
+ enum HCLGE_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGE_MAC_TO_ADD:
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGE_MAC_TO_DEL:
+ if (mac_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * ACTIVE.
+ */
+ case HCLGE_MAC_ACTIVE:
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+
+ break;
+ }
+}
+
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_node *mac_node;
+ struct list_head *list;
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * new state, or just remove it, or do nothing.
+ */
+ mac_node = hclge_find_mac_node(list, addr);
+ if (mac_node) {
+ hclge_update_mac_node(mac_node, state);
+ spin_unlock_bh(&vport->mac_list_lock);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+ return 0;
+ }
+
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGE_MAC_TO_DEL) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ dev_err(&hdev->pdev->dev,
+ "failed to delete address %pM from mac list\n",
+ addr);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
}
static int hclge_add_uc_addr(struct hnae3_handle *handle,
@@ -7232,7 +7396,8 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_uc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
+ addr);
}
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@@ -7271,15 +7436,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
*/
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
if (ret == -ENOENT) {
- if (!hclge_is_umv_space_full(vport)) {
+ mutex_lock(&hdev->vport_lock);
+ if (!hclge_is_umv_space_full(vport, false)) {
ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
if (!ret)
hclge_update_umv_space(vport, false);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
+ mutex_unlock(&hdev->vport_lock);
- dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
- hdev->priv_umv_size);
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
return -ENOSPC;
}
@@ -7303,7 +7472,8 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_rm_uc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
+ addr);
}
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
@@ -7326,8 +7496,13 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
- if (!ret)
+ if (!ret) {
+ mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
+ mutex_unlock(&hdev->vport_lock);
+ } else if (ret == -ENOENT) {
+ ret = 0;
+ }
return ret;
}
@@ -7337,7 +7512,8 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_add_mc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
+ addr);
}
int hclge_add_mc_addr_common(struct hclge_vport *vport,
@@ -7369,7 +7545,9 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- if (status == -ENOSPC)
+ /* if already overflow, not to print each time */
+ if (status == -ENOSPC &&
+ !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
return status;
@@ -7380,7 +7558,8 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
- return hclge_rm_mc_addr_common(vport, addr);
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
+ addr);
}
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
@@ -7415,111 +7594,354 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- } else {
- /* Maybe this mac address is in mta table, but it cannot be
- * deleted here because an entry of mta represents an address
- * range rather than a specific address. the delete action to
- * all entries will take effect in update_mta_status called by
- * hns3_nic_set_rx_mode.
- */
+ } else if (status == -ENOENT) {
status = 0;
}
return status;
}
-void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ int (*sync)(struct hclge_vport *,
+ const unsigned char *))
{
- struct hclge_vport_mac_addr_cfg *mac_cfg;
- struct list_head *list;
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
- if (!vport->vport_id)
- return;
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = sync(vport, mac_node->mac_addr);
+ if (!ret) {
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
+}
- mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
- if (!mac_cfg)
- return;
+static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ int (*unsync)(struct hclge_vport *,
+ const unsigned char *))
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = unsync(vport, mac_node->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
+}
- mac_cfg->hd_tbl_status = true;
- memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
+static bool hclge_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ bool all_added = true;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ all_added = false;
- list_add_tail(&mac_cfg->node, list);
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of adding the mac address into mac
+ * table. if mac_node state is ACTIVE, then change it to TO_DEL,
+ * then it will be removed at next time. else it must be TO_ADD,
+ * this address hasn't been added into mac table,
+ * so just remove the mac node.
+ */
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclge_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+
+ return all_added;
}
-void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- bool is_write_tbl,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
- struct list_head *list;
- bool uc_flag, mc_flag;
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr exists in the mac list, it means
+ * received a new TO_ADD request during the time window
+ * of configuring the mac address. For the mac node
+ * state is TO_ADD, and the address is already in the
+ * in the hardware(due to delete fail), so we just need
+ * to change the mac node state to ACTIVE.
+ */
+ new_node->state = HCLGE_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ }
+ }
+}
- uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
- mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
+static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ bool is_all_added)
+{
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ } else {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
+ }
+}
- list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
- if (uc_flag && mac_cfg->hd_tbl_status)
- hclge_rm_uc_addr_common(vport, mac_addr);
+static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+ bool all_added;
- if (mc_flag && mac_cfg->hd_tbl_status)
- hclge_rm_mc_addr_common(vport, mac_addr);
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
- list_del(&mac_cfg->node);
- kfree(mac_cfg);
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
break;
}
}
+
+stop_traverse:
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_uc_addr_common);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
+ hclge_add_uc_addr_common);
+ } else {
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_mc_addr_common);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
+ hclge_add_mc_addr_common);
+ }
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_update_overflow_flags(vport, mac_type, all_added);
+}
+
+static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block))
+ return false;
+
+ if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
+ return true;
+
+ return false;
+}
+
+static void hclge_sync_mac_table(struct hclge_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (!hclge_need_sync_mac_table(vport))
+ continue;
+
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
+ }
}
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type)
{
- struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
- struct list_head *list;
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_mac_node *mac_cfg, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+ int ret;
- list = (mac_type == HCLGE_MAC_ADDR_UC) ?
- &vport->uc_mac_list : &vport->mc_mac_list;
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
- list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
- hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
+ INIT_LIST_HEAD(&tmp_del_list);
- if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
- hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
- mac_cfg->hd_tbl_status = false;
- if (is_del_list) {
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
+ switch (mac_cfg->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
- kfree(mac_cfg);
+ list_add_tail(&mac_cfg->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ if (is_del_list) {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ ret = unsync(vport, mac_cfg->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ /* clear all mac addr from hardware, but remain these
+ * mac addr in the mac list, and restore them after
+ * vf reset finished.
+ */
+ if (!is_del_list &&
+ mac_cfg->state == HCLGE_MAC_ACTIVE) {
+ mac_cfg->state = HCLGE_MAC_TO_ADD;
+ } else {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ } else if (is_del_list) {
+ mac_cfg->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+/* remove all mac address when uninitailize */
+static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ break;
}
}
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_uc_addr_common);
+ else
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_mc_addr_common);
+
+ if (!list_empty(&tmp_del_list))
+ dev_warn(&hdev->pdev->dev,
+ "uninit %s mac list for vport %u not completely.\n",
+ mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
+ vport->vport_id);
+
+ list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
}
-void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
+static void hclge_uninit_mac_table(struct hclge_dev *hdev)
{
- struct hclge_vport_mac_addr_cfg *mac, *tmp;
struct hclge_vport *vport;
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
- list_del(&mac->node);
- kfree(mac);
- }
-
- list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
- list_del(&mac->node);
- kfree(mac);
- }
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
}
}
@@ -7683,12 +8105,57 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr)
+{
+ struct list_head *list = &vport->uc_mac_list;
+ struct hclge_mac_node *old_node, *new_node;
+
+ new_node = hclge_find_mac_node(list, new_addr);
+ if (!new_node) {
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->state = HCLGE_MAC_TO_ADD;
+ ether_addr_copy(new_node->mac_addr, new_addr);
+ list_add(&new_node->node, list);
+ } else {
+ if (new_node->state == HCLGE_MAC_TO_DEL)
+ new_node->state = HCLGE_MAC_ACTIVE;
+
+ /* make sure the new addr is in the list head, avoid dev
+ * addr may be not re-added into mac table for the umv space
+ * limitation after global/imp reset which will clear mac
+ * table by hardware.
+ */
+ list_move(&new_node->node, list);
+ }
+
+ if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
+ old_node = hclge_find_mac_node(list, old_addr);
+ if (old_node) {
+ if (old_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&old_node->node);
+ kfree(old_node);
+ } else {
+ old_node->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ return 0;
+}
+
static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ unsigned char *old_addr = NULL;
int ret;
/* mac addr check */
@@ -7696,39 +8163,42 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
dev_err(&hdev->pdev->dev,
- "Change uc mac err! invalid mac:%pM.\n",
+ "change uc mac err! invalid mac: %pM.\n",
new_addr);
return -EINVAL;
}
- if ((!is_first || is_kdump_kernel()) &&
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
- dev_warn(&hdev->pdev->dev,
- "remove old uc mac address fail.\n");
-
- ret = hclge_add_uc_addr(handle, new_addr);
+ ret = hclge_pause_addr_cfg(hdev, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
- "add uc mac address fail, ret =%d.\n",
+ "failed to configure mac pause address, ret = %d\n",
ret);
-
- if (!is_first &&
- hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
- dev_err(&hdev->pdev->dev,
- "restore uc mac address fail.\n");
-
- return -EIO;
+ return ret;
}
- ret = hclge_pause_addr_cfg(hdev, new_addr);
+ if (!is_first)
+ old_addr = hdev->hw.mac.mac_addr;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
- "configure mac pause address fail, ret =%d.\n",
- ret);
- return -EIO;
- }
+ "failed to change the mac addr:%pM, ret = %d\n",
+ new_addr, ret);
+ spin_unlock_bh(&vport->mac_list_lock);
+ if (!is_first)
+ hclge_pause_addr_cfg(hdev, old_addr);
+
+ return ret;
+ }
+ /* we must update dev addr with spin lock protect, preventing dev addr
+ * being removed by set_rx_mode path.
+ */
ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_task_schedule(hdev, 0);
return 0;
}
@@ -8308,42 +8778,80 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
}
}
-static void hclge_restore_vlan_table(struct hnae3_handle *handle)
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
u16 vlan_proto;
- u16 state, vlan_id;
- int i;
+ u16 vlan_id;
+ u16 state;
+ int ret;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
- vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- state = vport->port_base_vlan_cfg.state;
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ state = vport->port_base_vlan_cfg.state;
- if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
- hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id,
- false);
- continue;
- }
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id, vlan_id,
+ false);
+ return;
+ }
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- int ret;
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
+ vlan->hd_tbl_status = true;
+ }
+}
- if (!vlan->hd_tbl_status)
- continue;
- ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id, false);
- if (ret)
- break;
+/* For global reset and imp reset, hardware will clear the mac table,
+ * so we change the mac address state from ACTIVE to TO_ADD, then they
+ * can be restored in the service task after reset complete. Furtherly,
+ * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
+ * be restored after reset, so just remove these mac nodes from mac_list.
+ */
+static void hclge_mac_node_convert_for_reset(struct list_head *list)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_ADD;
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
}
}
}
+void hclge_restore_mac_table_common(struct hclge_vport *vport)
+{
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
+ hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+static void hclge_restore_hw_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+
+ hclge_restore_fd_entries(handle);
+}
+
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -9412,10 +9920,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
int ret;
hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
- if (!hdev) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!hdev)
+ return -ENOMEM;
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
@@ -9594,6 +10100,7 @@ err_pci_uninit:
pci_release_regions(pdev);
pci_disable_device(pdev);
out:
+ mutex_destroy(&hdev->vport_lock);
return ret;
}
@@ -9658,7 +10165,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
dev_warn(&hdev->pdev->dev,
"vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
vf);
- else if (enable && hclge_is_umv_space_full(vport))
+ else if (enable && hclge_is_umv_space_full(vport, true))
dev_warn(&hdev->pdev->dev,
"vf %d mac table is full, enable spoof check may cause its packet send fail\n",
vf);
@@ -9835,8 +10342,16 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
hclge_stats_clear(hdev);
- memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
- memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
+ * so here should not clean table in memory.
+ */
+ if (hdev->reset_type == HNAE3_IMP_RESET ||
+ hdev->reset_type == HNAE3_GLOBAL_RESET) {
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
+ hclge_reset_umv_space(hdev);
+ }
ret = hclge_cmd_init(hdev);
if (ret) {
@@ -9850,8 +10365,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- hclge_reset_umv_space(hdev);
-
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -9947,12 +10460,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
+ hclge_uninit_mac_table(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
- hclge_uninit_umv_space(hdev);
-
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
@@ -9966,7 +10478,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock);
- hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
ae_dev->priv = NULL;
}
@@ -10213,16 +10724,19 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
- /*prepare 4 commands to query DFX BD number*/
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
- desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+ int i;
- return hclge_cmd_send(&hdev->hw, desc, 4);
+ /* initialize command BD except the last one */
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+
+ /* initialize the last command BD */
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
}
static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
@@ -10576,6 +11090,131 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
return hclge_config_gro(hdev, enable);
}
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+ u8 tmp_flags = 0;
+ int ret;
+
+ if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ vport->last_promisc_flags = vport->overflow_promisc_flags;
+ }
+
+ if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
+ tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
+ ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
+ tmp_flags & HNAE3_MPE);
+ if (!ret) {
+ clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ hclge_enable_vlan_filter(handle,
+ tmp_flags & HNAE3_VLAN_FLTR);
+ }
+ }
+}
+
+static bool hclge_module_existed(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ u32 existed;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP exist state, ret = %d\n", ret);
+ return false;
+ }
+
+ existed = le32_to_cpu(desc.data[0]);
+
+ return existed != 0;
+}
+
+/* need 6 bds(total 140 bytes) in one reading
+ * return the number of bytes actually read, 0 means read failed.
+ */
+static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
+ struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
+ u16 read_len;
+ u16 copy_len;
+ int ret;
+ int i;
+
+ /* setup all 6 bds to read module eeprom info. */
+ for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
+ true);
+
+ /* bd0~bd4 need next flag */
+ if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+
+ /* setup bd0, this bd contains offset and read length. */
+ sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
+ sfp_info_bd0->offset = cpu_to_le16((u16)offset);
+ read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
+ sfp_info_bd0->read_len = cpu_to_le16(read_len);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, i);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP eeprom info, ret = %d\n", ret);
+ return 0;
+ }
+
+ /* copy sfp info from bd0 to out buffer. */
+ copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
+ memcpy(data, sfp_info_bd0->data, copy_len);
+ read_len = copy_len;
+
+ /* copy sfp info from bd1~bd5 to out buffer if needed. */
+ for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ if (read_len >= len)
+ return read_len;
+
+ copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
+ memcpy(data + read_len, desc[i].data, copy_len);
+ read_len += copy_len;
+ }
+
+ return read_len;
+}
+
+static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 read_len = 0;
+ u16 data_len;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ if (!hclge_module_existed(hdev))
+ return -ENXIO;
+
+ while (read_len < len) {
+ data_len = hclge_get_sfp_eeprom_info(hdev,
+ offset + read_len,
+ len - read_len,
+ data + read_len);
+ if (!data_len)
+ return -EIO;
+
+ read_len += data_len;
+ }
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -10588,6 +11227,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_vector = hclge_get_vector,
.put_vector = hclge_put_vector,
.set_promisc_mode = hclge_set_promisc_mode,
+ .request_update_promisc_mode = hclge_request_update_promisc_mode,
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
@@ -10649,7 +11289,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
.get_fd_rule_info = hclge_get_fd_rule_info,
.get_fd_all_rules = hclge_get_all_rules,
- .restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
@@ -10662,13 +11301,14 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
- .restore_vlan_table = hclge_restore_vlan_table,
.get_vf_config = hclge_get_vf_config,
.set_vf_link_state = hclge_set_vf_link_state,
.set_vf_spoofchk = hclge_set_vf_spoofchk,
.set_vf_trust = hclge_set_vf_trust,
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
+ .get_module_eeprom = hclge_get_module_eeprom,
+ .get_cmdq_stat = hclge_get_cmdq_stat,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 71df23d5f1b4..46e6e0fef3ba 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -217,6 +217,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
+ HCLGE_STATE_PROMISC_CHANGED,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_MAX
};
@@ -580,7 +581,6 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg {
u8 fd_mode;
u16 max_key_length; /* use bit as unit */
- u32 proto_support;
u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
@@ -631,9 +631,15 @@ struct hclge_fd_ad_data {
u16 rule_id;
};
-struct hclge_vport_mac_addr_cfg {
+enum HCLGE_MAC_NODE_STATE {
+ HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ACTIVE
+};
+
+struct hclge_mac_node {
struct list_head node;
- int hd_tbl_status;
+ enum HCLGE_MAC_NODE_STATE state;
u8 mac_addr[ETH_ALEN];
};
@@ -765,12 +771,6 @@ struct hclge_dev {
u16 num_roce_msi; /* Num of roce vectors for this PF */
int roce_base_vector;
- u16 pending_udp_bitmap;
-
- u16 rx_itr_default;
- u16 tx_itr_default;
-
- u16 adminq_work_limit; /* Num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list reset_timer;
@@ -806,6 +806,8 @@ struct hclge_dev {
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
@@ -823,7 +825,6 @@ struct hclge_dev {
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
- struct mutex umv_mutex; /* protect share_umv_size */
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
@@ -867,6 +868,7 @@ struct hclge_rss_tuple_cfg {
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
HCLGE_VPORT_STATE_MAX
};
@@ -923,6 +925,10 @@ struct hclge_vport {
u32 mps; /* Max packet size */
struct hclge_vf_info vf_info;
+ u8 overflow_promisc_flags;
+ u8 last_promisc_flags;
+
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
struct list_head vlan_list; /* Store VF vlan table */
@@ -978,16 +984,18 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
-void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- enum HCLGE_MAC_ADDR_TYPE mac_type);
-void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
- bool is_write_tbl,
- enum HCLGE_MAC_ADDR_TYPE mac_type);
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr);
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr);
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
-void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+void hclge_restore_mac_table_common(struct hclge_vport *vport);
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7f24fcb4f96a..0874ae47cb03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -5,6 +5,9 @@
#include "hclge_mbx.h"
#include "hnae3.h"
+#define CREATE_TRACE_POINTS
+#include "hclge_trace.h"
+
static u16 hclge_errno_to_resp(int errno)
{
return abs(errno);
@@ -90,6 +93,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
+
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
@@ -270,26 +275,17 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
if (!is_valid_ether_addr(mac_addr))
return -EINVAL;
- hclge_rm_uc_addr_common(vport, old_addr);
- status = hclge_add_uc_addr_common(vport, mac_addr);
- if (status) {
- hclge_add_uc_addr_common(vport, old_addr);
- } else {
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_UC);
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_UC);
- }
+ spin_lock_bh(&vport->mac_list_lock);
+ status = hclge_update_mac_node_for_dev_addr(vport, old_addr,
+ mac_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+ hclge_task_schedule(hdev, 0);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
- status = hclge_add_uc_addr_common(vport, mac_addr);
- if (!status)
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_UC);
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_UC, mac_addr);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
- status = hclge_rm_uc_addr_common(vport, mac_addr);
- if (!status)
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_UC);
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_UC, mac_addr);
} else {
dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %u\n",
@@ -305,18 +301,13 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
{
const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
- int status;
if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
- status = hclge_add_mc_addr_common(vport, mac_addr);
- if (!status)
- hclge_add_vport_mac_table(vport, mac_addr,
- HCLGE_MAC_ADDR_MC);
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_MC, mac_addr);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
- status = hclge_rm_mc_addr_common(vport, mac_addr);
- if (!status)
- hclge_rm_vport_mac_table(vport, mac_addr,
- false, HCLGE_MAC_ADDR_MC);
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_MC, mac_addr);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %u\n",
@@ -324,7 +315,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
return -EIO;
}
- return status;
+ return 0;
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
@@ -638,6 +629,23 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
ae_dev->ops->reset_event(hdev->pdev, NULL);
}
+static void hclge_handle_vf_tbl(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vf_vlan_cfg *msg_cmd;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) {
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(vport, true);
+ } else {
+ dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n",
+ msg_cmd->subcode);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -645,6 +653,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
struct hclge_desc *desc;
+ bool is_del = false;
unsigned int flag;
int ret = 0;
@@ -674,6 +683,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
vport = &hdev->vport[req->mbx_src_vfid];
+ trace_hclge_pf_mbx_get(hdev, req);
+
switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
@@ -731,7 +742,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret = hclge_get_link_info(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get link stat for VF\n",
+ "failed to inform link stat to VF, ret = %d\n",
ret);
break;
case HCLGE_MBX_QUEUE_RESET:
@@ -760,11 +771,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
case HCLGE_MBX_VF_UNINIT:
- hclge_rm_vport_all_mac_table(vport, true,
+ is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
+ hclge_rm_vport_all_mac_table(vport, is_del,
HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, true,
+ hclge_rm_vport_all_mac_table(vport, is_del,
HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, true);
+ hclge_rm_vport_all_vlan_table(vport, is_del);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
hclge_get_vf_media_type(vport, &resp_msg);
@@ -778,6 +790,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev);
break;
+ case HCLGE_MBX_HANDLE_VF_TBL:
+ hclge_handle_vf_tbl(vport, req);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 696c5ae922e3..e89820702540 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -155,7 +155,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
- "Failed to register MDIO bus ret = %#x\n", ret);
+ "failed to register MDIO bus, ret = %d\n", ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
new file mode 100644
index 000000000000..5b0b71bd6120
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2020 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_pf_mbx_get,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_pf_mbx_send,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = req->msg.code;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGE_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclge_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index 53804d95ea90..2c26ea607a53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index f38d236ebf4f..fec65239a3c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -11,9 +11,6 @@
#include "hclgevf_main.h"
#include "hnae3.h"
-#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
-#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
- DMA_TO_DEVICE : DMA_FROM_DEVICE)
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index f830eef02e5c..40d6e602ab51 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -161,8 +161,8 @@ struct hclgevf_query_res_cmd {
#define HCLGEVF_GRO_EN_B 0
struct hclgevf_cfg_gro_status_cmd {
- __le16 gro_en;
- u8 rsv[22];
+ u8 gro_en;
+ u8 rsv[23];
};
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e02d427131ee..1b9578d0bd80 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -46,7 +46,7 @@ static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
HCLGEVF_CMDQ_RX_TAIL_REG,
HCLGEVF_CMDQ_RX_HEAD_REG,
HCLGEVF_VECTOR0_CMDQ_SRC_REG,
- HCLGEVF_CMDQ_INTR_STS_REG,
+ HCLGEVF_VECTOR0_CMDQ_STATE_REG,
HCLGEVF_CMDQ_INTR_EN_REG,
HCLGEVF_CMDQ_INTR_GEN_REG};
@@ -669,8 +669,8 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
u16 tc_size[HCLGEVF_MAX_TC_NUM];
struct hclgevf_desc desc;
u16 roundup_size;
- int status;
unsigned int i;
+ int status;
req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
@@ -1143,7 +1143,6 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
send_msg.en_mc = en_mc_pmc ? 1 : 0;
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
-
if (ret)
dev_err(&hdev->pdev->dev,
"Set promisc mode fail, status is %d.\n", ret);
@@ -1164,6 +1163,27 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
en_bc_pmc);
}
+static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+}
+
+static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
+ bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
+ ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
+ if (!ret)
+ clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+ }
+}
+
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable)
{
@@ -1245,10 +1265,12 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
int status;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
- send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
- HCLGE_MBX_MAC_VLAN_UC_MODIFY;
+ send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
ether_addr_copy(send_msg.data, new_mac_addr);
- ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
+ if (is_first && !hdev->has_pf_mac)
+ eth_zero_addr(&send_msg.data[ETH_ALEN]);
+ else
+ ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
@@ -1256,54 +1278,302 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
return status;
}
-static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
- const unsigned char *addr)
+static struct hclgevf_mac_addr_node *
+hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGEVF_MAC_TO_ADD:
+ if (mac_node->state == HCLGEVF_MAC_TO_DEL)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGEVF_MAC_TO_DEL:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * HCLGEVF_MAC_ACTIVE
+ */
+ case HCLGEVF_MAC_ACTIVE:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ }
+}
+
+static int hclgevf_update_mac_list(struct hnae3_handle *handle,
+ enum HCLGEVF_MAC_NODE_STATE state,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
+ struct hclgevf_mac_addr_node *mac_node;
+ struct list_head *list;
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_ADD);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * new state, or just remove it, or do nothing.
+ */
+ mac_node = hclgevf_find_mac_node(list, addr);
+ if (mac_node) {
+ hclgevf_update_mac_node(mac_node, state);
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+ }
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGEVF_MAC_TO_DEL) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOMEM;
+ }
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_UC, addr);
}
static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
-
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_REMOVE);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_UC, addr);
}
static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclge_vf_to_pf_msg send_msg;
-
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_ADD);
- ether_addr_copy(send_msg.data, addr);
- return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_MC, addr);
}
static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_MC, addr);
+}
+
+static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
+ struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
struct hclge_vf_to_pf_msg send_msg;
+ u8 code, subcode;
+
+ if (mac_type == HCLGEVF_MAC_ADDR_UC) {
+ code = HCLGE_MBX_SET_UNICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
+ } else {
+ code = HCLGE_MBX_SET_MULTICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
+ }
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_REMOVE);
- ether_addr_copy(send_msg.data, addr);
+ hclgevf_build_send_msg(&send_msg, code, subcode);
+ ether_addr_copy(send_msg.data, mac_node->mac_addr);
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
+ struct list_head *list,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to configure mac %pM, state = %d, ret = %d\n",
+ mac_node->mac_addr, mac_node->state, ret);
+ return;
+ }
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of sending mac config request to PF
+ * If mac_node state is ACTIVE, then change its state to TO_DEL,
+ * then it will be removed at next time. If is TO_ADD, it means
+ * send TO_ADD request failed, so just remove the mac node.
+ */
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclgevf_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr is exist in the mac list, it means
+ * received a new request TO_ADD during the time window
+ * of sending mac addr configurrequest to PF, so just
+ * change the mac state to ACTIVE.
+ */
+ new_node->state = HCLGEVF_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclgevf_clear_list(struct list_head *list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+}
+
+static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGEVF_MAC_TO_DEL:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGEVF_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
+ break;
+ }
+ }
+
+stop_traverse:
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
+ hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_sync_from_del_list(&tmp_del_list, list);
+ hclgevf_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
+static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
+{
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
+}
+
+static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
+{
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
+ hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
@@ -1506,10 +1776,6 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
- if (ret)
- return ret;
-
/* clear handshake status with IMP */
hclgevf_reset_handshake(hdev, false);
@@ -1559,7 +1825,7 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
@@ -1589,13 +1855,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
- /* Initialize ae_dev reset status as well, in case enet layer wants to
- * know if device is undergoing reset
- */
- ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.rst_cnt++;
rtnl_lock();
@@ -1610,7 +1871,6 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
hdev->rst_stats.hw_rst_done_cnt++;
@@ -1625,7 +1885,6 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
}
hdev->last_reset_time = jiffies;
- ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0;
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
@@ -1951,6 +2210,10 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
hclgevf_sync_vlan_filter(hdev);
+ hclgevf_sync_mac_table(hdev);
+
+ hclgevf_sync_promisc_mode(hdev);
+
hdev->last_serv_processed = jiffies;
out:
@@ -1986,7 +2249,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
/* fetch the events from their corresponding regs */
cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_STAT_REG);
+ HCLGEVF_VECTOR0_CMDQ_STATE_REG);
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
@@ -2139,7 +2402,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
false);
req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
- req->gro_en = cpu_to_le16(en ? 1 : 0);
+ req->gro_en = en ? 1 : 0;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -2313,6 +2576,10 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1);
+ spin_lock_init(&hdev->mac_table.mac_list_lock);
+ INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
+ INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
+
/* bring the device down */
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
}
@@ -2445,6 +2712,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
+ int rst_cnt = hdev->rst_stats.rst_cnt;
int ret;
ret = client->ops->init_instance(&hdev->nic);
@@ -2452,6 +2720,14 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
return ret;
set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.rst_cnt) {
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
+ client->ops->uninit_instance(&hdev->nic, 0);
+ return -EBUSY;
+ }
+
hnae3_set_client_init_flag(client, ae_dev, 1);
if (netif_msg_drv(&hdev->nic))
@@ -2695,6 +2971,15 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
return ret;
}
+static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
+ HCLGE_MBX_VPORT_LIST_CLEAR);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -2730,6 +3015,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -2802,6 +3089,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ /* ensure vf tbl list as empty before init*/
+ ret = hclgevf_clear_vport_list(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to clear tbl list configuration, ret = %d.\n",
+ ret);
+ goto err_config;
+ }
+
ret = hclgevf_init_vlan_config(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -2846,6 +3142,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev);
+ hclgevf_uninit_mac_list(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -3213,6 +3510,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_timer_task = hclgevf_set_timer_task,
.get_link_mode = hclgevf_get_link_mode,
.set_promisc_mode = hclgevf_set_promisc_mode,
+ .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 3b88d866facc..c1fac8920ae3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -42,8 +42,6 @@
#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020
#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024
#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028
-#define HCLGEVF_CMDQ_INTR_SRC_REG 0x27100
-#define HCLGEVF_CMDQ_INTR_STS_REG 0x27104
#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
@@ -88,7 +86,7 @@
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* Vector0 interrupt CMDQ event status register(RO) */
-#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104
+#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -148,6 +146,7 @@ enum hclgevf_states {
HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE,
HCLGEVF_STATE_LINK_UPDATING,
+ HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL,
};
@@ -234,6 +233,29 @@ struct hclgevf_rst_stats {
u32 rst_fail_cnt; /* the number of VF reset fail */
};
+enum HCLGEVF_MAC_ADDR_TYPE {
+ HCLGEVF_MAC_ADDR_UC,
+ HCLGEVF_MAC_ADDR_MC
+};
+
+enum HCLGEVF_MAC_NODE_STATE {
+ HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ACTIVE
+};
+
+struct hclgevf_mac_addr_node {
+ struct list_head node;
+ enum HCLGEVF_MAC_NODE_STATE state;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct hclgevf_mac_table_cfg {
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
+ struct list_head uc_mac_list;
+ struct list_head mc_mac_list;
+};
+
struct hclgevf_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -256,7 +278,7 @@ struct hclgevf_dev {
struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
- u16 num_tqps; /* num task queue pairs of this PF */
+ u16 num_tqps; /* num task queue pairs of this VF */
u16 alloc_rss_size; /* allocated RSS task queue */
u16 rss_size_max; /* HW defined max RSS task queue */
@@ -282,6 +304,8 @@ struct hclgevf_dev {
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+ struct hclgevf_mac_table_cfg mac_table;
+
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 9b8154955f91..5b2dcd97c107 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -5,6 +5,9 @@
#include "hclgevf_main.h"
#include "hnae3.h"
+#define CREATE_TRACE_POINTS
+#include "hclgevf_trace.h"
+
static int hclgevf_resp_to_errno(u16 resp_code)
{
return resp_code ? -resp_code : 0;
@@ -106,6 +109,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
+ trace_hclge_vf_mbx_send(hdev, req);
+
/* synchronous send */
if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex);
@@ -179,6 +184,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
continue;
}
+ trace_hclge_vf_mbx_get(hdev, req);
+
/* synchronous messages are time critical and need preferential
* treatment. Therefore, we need to acknowledge all the sync
* responses as quickly as possible so that waiting tasks do not
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
new file mode 100644
index 000000000000..e4bfb6191fef
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGEVF_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_vf_mbx_get,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = req->msg.code;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_vf_mbx_send,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGEVF_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclgevf_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
index fe88ab88cacc..32a011ca44c3 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -4,4 +4,4 @@ obj-$(CONFIG_HINIC) += hinic.o
hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o hinic_ethtool.o
+ hinic_common.o hinic_ethtool.o hinic_hw_mbox.o hinic_sriov.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index a209b14160cc..48b40be3e84d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -16,6 +16,7 @@
#include "hinic_hw_dev.h"
#include "hinic_tx.h"
#include "hinic_rx.h"
+#include "hinic_sriov.h"
#define HINIC_DRV_NAME "hinic"
@@ -23,6 +24,7 @@ enum hinic_flags {
HINIC_LINK_UP = BIT(0),
HINIC_INTF_UP = BIT(1),
HINIC_RSS_ENABLE = BIT(2),
+ HINIC_LINK_DOWN = BIT(3),
};
struct hinic_rx_mode_work {
@@ -67,6 +69,8 @@ struct hinic_dev {
struct hinic_txq *txqs;
struct hinic_rxq *rxqs;
+ u16 sq_depth;
+ u16 rq_depth;
struct hinic_txq_stats tx_stats;
struct hinic_rxq_stats rx_stats;
@@ -78,6 +82,7 @@ struct hinic_dev {
struct hinic_rss_type rss_type;
u8 *rss_hkey_user;
s32 *rss_indir_user;
+ struct hinic_sriov_info sriov_info;
};
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 966aea949c0b..efb02e03e7da 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -33,6 +33,99 @@
#include "hinic_rx.h"
#include "hinic_dev.h"
+#define SET_LINK_STR_MAX_LEN 128
+
+#define GET_SUPPORTED_MODE 0
+#define GET_ADVERTISED_MODE 1
+
+#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= \
+ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
+#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->supported |= SUPPORTED_##mode)
+#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
+ ((ecmd)->advertising |= ADVERTISED_##mode)
+
+struct hw2ethtool_link_mode {
+ enum ethtool_link_mode_bit_indices link_mode_bit;
+ u32 speed;
+ enum hinic_link_mode hw_link_mode;
+};
+
+struct cmd_link_settings {
+ u64 supported;
+ u64 advertising;
+
+ u32 speed;
+ u8 duplex;
+ u8 port;
+ u8 autoneg;
+};
+
+static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
+ SPEED_10, SPEED_100,
+ SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000,
+ SPEED_100000
+};
+
+static struct hw2ethtool_link_mode
+ hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
+ .hw_link_mode = HINIC_10GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
+ .hw_link_mode = HINIC_40GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_KR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ .hw_link_mode = HINIC_100GE_BASE_CR4,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR_S,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_KR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
+ .hw_link_mode = HINIC_25GE_BASE_CR,
+ },
+ {
+ .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
+ .hw_link_mode = HINIC_GE_BASE_KX,
+ },
+};
+
static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
enum hinic_speed speed)
{
@@ -71,18 +164,91 @@ static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
}
}
+static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
+{
+ int i = 0;
+
+ for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
+ if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
+ break;
+ }
+
+ return i;
+}
+
+static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
+ enum hinic_link_mode hw_link_mode,
+ u32 name)
+{
+ enum hinic_link_mode link_mode;
+ int idx = 0;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (hw_link_mode & ((u32)1 << link_mode)) {
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (name == GET_SUPPORTED_MODE)
+ ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
+ (link_settings, idx);
+ else
+ ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
+ (link_settings, idx);
+ }
+ }
+}
+
+static void hinic_link_port_type(struct cmd_link_settings *link_settings,
+ enum hinic_port_type port_type)
+{
+ switch (port_type) {
+ case HINIC_PORT_ELEC:
+ case HINIC_PORT_TP:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
+ link_settings->port = PORT_TP;
+ break;
+
+ case HINIC_PORT_AOC:
+ case HINIC_PORT_FIBRE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_FIBRE;
+ break;
+
+ case HINIC_PORT_COPPER:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
+ link_settings->port = PORT_DA;
+ break;
+
+ case HINIC_PORT_BACKPLANE:
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
+ link_settings->port = PORT_NONE;
+ break;
+
+ default:
+ link_settings->port = PORT_OTHER;
+ break;
+ }
+}
+
static int hinic_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings
*link_ksettings)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct hinic_pause_config pause_info = { 0 };
+ struct cmd_link_settings settings = { 0 };
enum hinic_port_link_state link_state;
struct hinic_port_cap port_cap;
int err;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
- Autoneg);
link_ksettings->base.speed = SPEED_UNKNOWN;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
@@ -92,14 +258,19 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
if (err)
return err;
+ hinic_link_port_type(&settings, port_cap.port_type);
+ link_ksettings->base.port = settings.port;
+
err = hinic_port_link_state(nic_dev, &link_state);
if (err)
return err;
- if (link_state != HINIC_LINK_STATE_UP)
- return err;
-
- set_link_speed(link_ksettings, port_cap.speed);
+ if (link_state == HINIC_LINK_STATE_UP) {
+ set_link_speed(link_ksettings, port_cap.speed);
+ link_ksettings->base.duplex =
+ (port_cap.duplex == HINIC_DUPLEX_FULL) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
ethtool_link_ksettings_add_link_mode(link_ksettings,
@@ -108,11 +279,243 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
link_ksettings->base.autoneg = AUTONEG_ENABLE;
- link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ?
- DUPLEX_FULL : DUPLEX_HALF;
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return -EIO;
+
+ hinic_add_ethtool_link_mode(&settings, link_mode.supported,
+ GET_SUPPORTED_MODE);
+ hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
+ GET_ADVERTISED_MODE);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
+ if (err)
+ return err;
+ ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
+ if (pause_info.rx_pause && pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ } else if (pause_info.tx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ } else if (pause_info.rx_pause) {
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
+ ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
+ }
+ }
+
+ bitmap_copy(link_ksettings->link_modes.supported,
+ (unsigned long *)&settings.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_copy(link_ksettings->link_modes.advertising,
+ (unsigned long *)&settings.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
return 0;
}
+static int hinic_ethtool_to_hw_speed_level(u32 speed)
+{
+ int i;
+
+ for (i = 0; i < LINK_SPEED_LEVELS; i++) {
+ if (hw_to_ethtool_speed[i] == speed)
+ break;
+ }
+
+ return i;
+}
+
+static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
+ u32 speed)
+{
+ enum hinic_link_mode link_mode;
+ int idx;
+
+ for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
+ if (!(supported_link & ((u32)1 << link_mode)))
+ continue;
+
+ idx = hinic_get_link_mode_index(link_mode);
+ if (idx >= HINIC_LINK_MODE_NUMBERS)
+ continue;
+
+ if (hw_to_ethtool_link_mode_table[idx].speed == speed)
+ return true;
+ }
+
+ return false;
+}
+
+static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
+{
+ struct hinic_link_mode_cmd link_mode = { 0 };
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
+ if (err)
+ return false;
+
+ if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
+ link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
+ return false;
+
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ if (speed_level >= LINK_SPEED_LEVELS ||
+ !hinic_is_support_speed(link_mode.supported, speed)) {
+ netif_err(nic_dev, drv, netdev,
+ "Unsupported speed: %d\n", speed);
+ return false;
+ }
+
+ return true;
+}
+
+static int get_link_settings_type(struct hinic_dev *nic_dev,
+ u8 autoneg, u32 speed, u32 *set_settings)
+{
+ struct hinic_port_cap port_cap = { 0 };
+ int err;
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err)
+ return err;
+
+ /* always set autonegotiation */
+ if (port_cap.autoneg_cap)
+ *set_settings |= HILINK_LINK_SET_AUTONEG;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ if (!port_cap.autoneg_cap) {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (speed != (u32)SPEED_UNKNOWN) {
+ /* set speed only when autoneg is disabled */
+ if (!hinic_is_speed_legal(nic_dev, speed))
+ return -EINVAL;
+ *set_settings |= HILINK_LINK_SET_SPEED;
+ } else {
+ netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg,
+ u32 speed)
+{
+ enum nic_speed_level speed_level = 0;
+ int err = 0;
+
+ if (set_settings & HILINK_LINK_SET_AUTONEG) {
+ err = hinic_set_autoneg(nic_dev->hwdev,
+ (autoneg == AUTONEG_ENABLE));
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
+ (autoneg == AUTONEG_ENABLE) ?
+ "Enable" : "Disable");
+ }
+
+ if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = hinic_set_speed(nic_dev->hwdev, speed_level);
+ if (err)
+ netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
+ speed);
+ else
+ netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
+ speed);
+ }
+
+ return err;
+}
+
+static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
+ u32 set_settings, u8 autoneg, u32 speed)
+{
+ struct hinic_link_ksettings_info settings = {0};
+ char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
+ struct net_device *netdev = nic_dev->netdev;
+ enum nic_speed_level speed_level = 0;
+ int err;
+
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "%s",
+ (set_settings & HILINK_LINK_SET_AUTONEG) ?
+ (autoneg ? "autong enable " : "autong disable ") : "");
+ if (err < 0 || err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link state, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+
+ if (set_settings & HILINK_LINK_SET_SPEED) {
+ speed_level = hinic_ethtool_to_hw_speed_level(speed);
+ err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
+ "%sspeed %d ", set_link_str, speed);
+ if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) {
+ netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
+ err, SET_LINK_STR_MAX_LEN);
+ return -EFAULT;
+ }
+ }
+
+ settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ settings.valid_bitmap = set_settings;
+ settings.autoneg = autoneg;
+ settings.speed = speed_level;
+
+ err = hinic_set_link_settings(nic_dev->hwdev, &settings);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
+ if (err)
+ netif_err(nic_dev, drv, netdev, "Set %s failed\n",
+ set_link_str);
+ else
+ netif_info(nic_dev, drv, netdev, "Set %s successfully\n",
+ set_link_str);
+
+ return err;
+ }
+
+ return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
+ speed);
+}
+
+static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u32 set_settings = 0;
+ int err;
+
+ err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
+ if (err)
+ return err;
+
+ if (set_settings)
+ err = hinic_set_settings_to_hw(nic_dev, set_settings,
+ autoneg, speed);
+ else
+ netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
+
+ return err;
+}
+
+static int hinic_set_link_ksettings(struct net_device *netdev, const struct
+ ethtool_link_ksettings *link_settings)
+{
+ /* only support to set autoneg and speed */
+ return set_link_settings(netdev, link_settings->base.autoneg,
+ link_settings->base.speed);
+}
+
static void hinic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -135,26 +538,118 @@ static void hinic_get_drvinfo(struct net_device *netdev,
static void hinic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
- ring->rx_max_pending = HINIC_RQ_DEPTH;
- ring->tx_max_pending = HINIC_SQ_DEPTH;
- ring->rx_pending = HINIC_RQ_DEPTH;
- ring->tx_pending = HINIC_SQ_DEPTH;
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
+ ring->rx_pending = nic_dev->rq_depth;
+ ring->tx_pending = nic_dev->sq_depth;
+}
+
+static int check_ringparam_valid(struct hinic_dev *nic_dev,
+ struct ethtool_ringparam *ring)
+{
+ if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Unsupported rx_jumbo_pending/rx_mini_pending\n");
+ return -EINVAL;
+ }
+
+ if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
+ ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
+ ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Queue depth out of range [%d-%d]\n",
+ HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+
+ return 0;
}
+static int hinic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 new_sq_depth, new_rq_depth;
+ int err;
+
+ err = check_ringparam_valid(nic_dev, ring);
+ if (err)
+ return err;
+
+ new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
+ new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
+
+ if (new_sq_depth == nic_dev->sq_depth &&
+ new_rq_depth == nic_dev->rq_depth)
+ return 0;
+
+ netif_info(nic_dev, drv, netdev,
+ "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
+ nic_dev->sq_depth, nic_dev->rq_depth,
+ new_sq_depth, new_rq_depth);
+
+ nic_dev->sq_depth = new_sq_depth;
+ nic_dev->rq_depth = new_rq_depth;
+
+ if (netif_running(netdev)) {
+ netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ err = hinic_close(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to close netdev\n");
+ return -EFAULT;
+ }
+
+ err = hinic_open(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
static void hinic_get_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
- channels->max_rx = hwdev->nic_cap.max_qps;
- channels->max_tx = hwdev->nic_cap.max_qps;
- channels->max_other = 0;
- channels->max_combined = 0;
- channels->rx_count = hinic_hwdev_num_qps(hwdev);
- channels->tx_count = hinic_hwdev_num_qps(hwdev);
- channels->other_count = 0;
- channels->combined_count = 0;
+ channels->max_combined = nic_dev->max_qps;
+ channels->combined_count = hinic_hwdev_num_qps(hwdev);
+}
+
+static int hinic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int count = channels->combined_count;
+ int err;
+
+ netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
+ hinic_hwdev_num_qps(nic_dev->hwdev), count);
+
+ if (netif_running(netdev)) {
+ netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
+ hinic_close(netdev);
+
+ nic_dev->hwdev->nic_cap.num_qps = count;
+
+ err = hinic_open(netdev);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to open netdev\n");
+ return -EFAULT;
+ }
+ } else {
+ nic_dev->hwdev->nic_cap.num_qps = count;
+ }
+
+ return 0;
}
static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
@@ -741,10 +1236,13 @@ static void hinic_get_strings(struct net_device *netdev,
static const struct ethtool_ops hinic_ethtool_ops = {
.get_link_ksettings = hinic_get_link_ksettings,
+ .set_link_ksettings = hinic_set_link_ksettings,
.get_drvinfo = hinic_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = hinic_get_ringparam,
+ .set_ringparam = hinic_set_ringparam,
.get_channels = hinic_get_channels,
+ .set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
.set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 5f2d57d1b2d3..cb5b6e5f787f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -64,7 +64,7 @@
#define CMDQ_WQE_SIZE 64
#define CMDQ_DEPTH SZ_4K
-#define CMDQ_WQ_PAGE_SIZE SZ_4K
+#define CMDQ_WQ_PAGE_SIZE SZ_256K
#define WQE_LCMD_SIZE 64
#define WQE_SCMD_SIZE 64
@@ -705,7 +705,7 @@ static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
/* The data in the HW is in Big Endian Format */
wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
- pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
+ pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
ctxt_info->curr_wqe_page_pfn =
HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
@@ -714,16 +714,19 @@ static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
- /* block PFN - Read Modify Write */
- cmdq_first_block_paddr = cmdq_pages->page_paddr;
+ if (wq->num_q_pages != 1) {
+ /* block PFN - Read Modify Write */
+ cmdq_first_block_paddr = cmdq_pages->page_paddr;
- pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
+ pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
+ }
ctxt_info->wq_block_pfn =
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
+ cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif);
cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
}
@@ -795,11 +798,6 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
size_t cmdq_ctxts_size;
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI function type\n");
- return -EINVAL;
- }
-
cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
if (!cmdq_ctxts)
@@ -851,6 +849,25 @@ err_init_cmdq:
return err;
}
+static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 };
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+
+ hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE;
+ hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_HWCTXT_SET,
+ &hw_ioctxt, sizeof(hw_ioctxt), NULL,
+ NULL, HINIC_MGMT_MSG_SYNC);
+}
+
/**
* hinic_init_cmdqs - init all cmdqs
* @cmdqs: cmdqs to init
@@ -901,8 +918,18 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
cmdq_ceq_handler);
+
+ err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n");
+ goto err_set_cmdq_depth;
+ }
+
return 0;
+err_set_cmdq_depth:
+ hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
+
err_cmdq_ctxt:
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 7a434b653faa..3e4b0aef9fe6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -122,7 +122,7 @@ struct hinic_cmdq_ctxt {
u16 func_idx;
u8 cmdq_type;
- u8 rsvd1[1];
+ u8 ppf_idx;
u8 rsvd2[4];
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
index cdec1d0a3962..7e84e4e33fff 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -10,7 +10,7 @@
/* HW interface registers */
#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
-
+#define HINIC_CSR_FUNC_ATTR2_ADDR 0x8
#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index c7c75b772a86..0245da02efbb 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -15,7 +15,9 @@
#include <linux/jiffies.h>
#include <linux/log2.h>
#include <linux/err.h>
+#include <linux/netdevice.h>
+#include "hinic_sriov.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
#include "hinic_hw_mgmt.h"
@@ -42,24 +44,6 @@ enum io_status {
IO_RUNNING = 1,
};
-enum hw_ioctxt_set_cmdq_depth {
- HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
-};
-
-/* HW struct */
-struct hinic_dev_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 rsvd1[5];
- u8 intr_type;
- u8 rsvd2[66];
- u16 max_sqs;
- u16 max_rqs;
- u8 rsvd3[208];
-};
-
/**
* get_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -67,16 +51,13 @@ struct hinic_dev_cap {
*
* Return 0 - Success, negative - Failure
**/
-static int get_capability(struct hinic_hwdev *hwdev,
- struct hinic_dev_cap *dev_cap)
+static int parse_capability(struct hinic_hwdev *hwdev,
+ struct hinic_dev_cap *dev_cap)
{
struct hinic_cap *nic_cap = &hwdev->nic_cap;
int num_aeqs, num_ceqs, num_irqs;
- if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif))
- return -EINVAL;
-
- if (dev_cap->intr_type != INTR_MSIX_TYPE)
+ if (!HINIC_IS_VF(hwdev->hwif) && dev_cap->intr_type != INTR_MSIX_TYPE)
return -EFAULT;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
@@ -89,13 +70,19 @@ static int get_capability(struct hinic_hwdev *hwdev,
if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
nic_cap->num_qps = HINIC_Q_CTXT_MAX;
- nic_cap->max_qps = dev_cap->max_sqs + 1;
- if (nic_cap->max_qps != (dev_cap->max_rqs + 1))
- return -EFAULT;
+ if (!HINIC_IS_VF(hwdev->hwif))
+ nic_cap->max_qps = dev_cap->max_sqs + 1;
+ else
+ nic_cap->max_qps = dev_cap->max_sqs;
if (nic_cap->num_qps > nic_cap->max_qps)
nic_cap->num_qps = nic_cap->max_qps;
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ nic_cap->max_vf = dev_cap->max_vf;
+ nic_cap->max_vf_qps = dev_cap->max_vf_sqs + 1;
+ }
+
return 0;
}
@@ -105,27 +92,26 @@ static int get_capability(struct hinic_hwdev *hwdev,
*
* Return 0 - Success, negative - Failure
**/
-static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev)
+static int get_capability(struct hinic_pfhwdev *pfhwdev)
{
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_dev_cap dev_cap;
- u16 in_len, out_len;
+ u16 out_len;
int err;
- in_len = 0;
out_len = sizeof(dev_cap);
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM,
- HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap,
- &out_len, HINIC_MGMT_MSG_SYNC);
+ HINIC_CFG_NIC_CAP, &dev_cap, sizeof(dev_cap),
+ &dev_cap, &out_len, HINIC_MGMT_MSG_SYNC);
if (err) {
dev_err(&pdev->dev, "Failed to get capability from FW\n");
return err;
}
- return get_capability(hwdev, &dev_cap);
+ return parse_capability(hwdev, &dev_cap);
}
/**
@@ -144,15 +130,14 @@ static int get_dev_cap(struct hinic_hwdev *hwdev)
switch (HINIC_FUNC_TYPE(hwif)) {
case HINIC_PPF:
case HINIC_PF:
+ case HINIC_VF:
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = get_cap_from_fw(pfhwdev);
+ err = get_capability(pfhwdev);
if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
+ dev_err(&pdev->dev, "Failed to get capability\n");
return err;
}
break;
-
default:
dev_err(&pdev->dev, "Unsupported PCI Function type\n");
return -EINVAL;
@@ -225,15 +210,8 @@ static void disable_msix(struct hinic_hwdev *hwdev)
int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd,
@@ -241,6 +219,19 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
HINIC_MGMT_MSG_SYNC);
}
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size)
+{
+ struct hinic_pfhwdev *pfhwdev;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_HILINK, cmd,
+ buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+}
+
/**
* init_fw_ctxt- Init Firmware tables before network mgmt and io operations
* @hwdev: the NIC HW device
@@ -252,14 +243,9 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_cmd_fw_ctxt fw_ctxt;
- u16 out_size;
+ u16 out_size = sizeof(fw_ctxt);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
@@ -283,19 +269,13 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
*
* Return 0 - Success, negative - Failure
**/
-static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
- unsigned int sq_depth)
+static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth,
+ unsigned int rq_depth)
{
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_cmd_hw_ioctxt hw_ioctxt;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
@@ -374,11 +354,6 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
struct hinic_pfhwdev *pfhwdev;
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
/* sleep 100ms to wait for firmware stopping I/O */
msleep(100);
@@ -410,14 +385,8 @@ static int set_resources_state(struct hinic_hwdev *hwdev,
{
struct hinic_cmd_set_res_state res_state;
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
res_state.state = state;
@@ -441,8 +410,8 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
{
struct hinic_cmd_base_qpn cmd_base_qpn;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(cmd_base_qpn);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -466,7 +435,7 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
*
* Return 0 - Success, negative - Failure
**/
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
{
struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
struct hinic_cap *nic_cap = &hwdev->nic_cap;
@@ -488,6 +457,9 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
+ func_to_io->hwdev = hwdev;
+ func_to_io->sq_depth = sq_depth;
+ func_to_io->rq_depth = rq_depth;
err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
ceq_msix_entries);
@@ -513,7 +485,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
hinic_db_state_set(hwif, HINIC_DB_ENABLE);
}
- err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH);
+ err = set_hw_ioctxt(hwdev, sq_depth, rq_depth);
if (err) {
dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
goto err_hw_ioctxt;
@@ -558,17 +530,10 @@ void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
u16 in_size, void *buf_out,
u16 *out_size))
{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_nic_cb *nic_cb;
u8 cmd_cb;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
@@ -588,15 +553,12 @@ void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
enum hinic_mgmt_msg_cmd cmd)
{
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_nic_cb *nic_cb;
u8 cmd_cb;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
+ if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif))
return;
- }
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
@@ -676,10 +638,23 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
- hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
- pfhwdev, nic_mgmt_msg_handler);
+ err = hinic_func_to_func_init(hwdev);
+ if (err) {
+ dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
+ hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
+ return err;
+ }
+
+ if (!HINIC_IS_VF(hwif))
+ hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_L2NIC, pfhwdev,
+ nic_mgmt_msg_handler);
+ else
+ hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_mgmt_msg_handler);
hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
+
return 0;
}
@@ -693,11 +668,43 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
- hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC);
+ if (!HINIC_IS_VF(hwdev->hwif))
+ hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt,
+ HINIC_MOD_L2NIC);
+ else
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+
+ hinic_func_to_func_free(hwdev);
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
+static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+ struct hinic_cmd_l2nic_reset l2nic_reset = {0};
+ u16 out_size = sizeof(l2nic_reset);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ l2nic_reset.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ /* 0 represents standard l2nic reset flow */
+ l2nic_reset.reset_flag = 0;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_L2NIC_RESET, &l2nic_reset,
+ sizeof(l2nic_reset), &l2nic_reset,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || l2nic_reset.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, l2nic_reset.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* hinic_init_hwdev - Initialize the NIC HW
* @pdev: the NIC pci device
@@ -723,12 +730,6 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
return ERR_PTR(err);
}
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- err = -EFAULT;
- goto err_func_type;
- }
-
pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL);
if (!pfhwdev) {
err = -ENOMEM;
@@ -766,12 +767,22 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
goto err_init_pfhwdev;
}
+ err = hinic_l2nic_reset(hwdev);
+ if (err)
+ goto err_l2nic_reset;
+
err = get_dev_cap(hwdev);
if (err) {
dev_err(&pdev->dev, "Failed to get device capabilities\n");
goto err_dev_cap;
}
+ err = hinic_vf_func_init(hwdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init nic mbox\n");
+ goto err_vf_func_init;
+ }
+
err = init_fw_ctxt(hwdev);
if (err) {
dev_err(&pdev->dev, "Failed to init function table\n");
@@ -788,6 +799,9 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
err_resources_state:
err_init_fw_ctxt:
+ hinic_vf_func_free(hwdev);
+err_vf_func_init:
+err_l2nic_reset:
err_dev_cap:
free_pfhwdev(pfhwdev);
@@ -799,7 +813,6 @@ err_aeqs_init:
err_init_msix:
err_pfhwdev_alloc:
-err_func_type:
hinic_free_hwif(hwif);
return ERR_PTR(err);
}
@@ -930,15 +943,9 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
{
struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
struct hinic_cmd_hw_ci hw_ci;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
hw_ci.dma_attr_off = 0;
hw_ci.pending_limit = pending_limit;
hw_ci.coalesc_timer = coalesc_timer;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 66fd2340d447..71ea7e46dbbc 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -16,18 +16,33 @@
#include "hinic_hw_mgmt.h"
#include "hinic_hw_qp.h"
#include "hinic_hw_io.h"
+#include "hinic_hw_mbox.h"
#define HINIC_MAX_QPS 32
#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \
HINIC_MGMT_MSG_CMD_BASE)
+#define HINIC_PF_SET_VF_ALREADY 0x4
+#define HINIC_MGMT_STATUS_EXIST 0x6
+#define HINIC_MGMT_CMD_UNSUPPORTED 0xFF
+
struct hinic_cap {
u16 max_qps;
u16 num_qps;
+ u8 max_vf;
+ u16 max_vf_qps;
+};
+
+enum hw_ioctxt_set_cmdq_depth {
+ HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
+ HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE,
};
enum hinic_port_cmd {
+ HINIC_PORT_CMD_VF_REGISTER = 0x0,
+ HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
+
HINIC_PORT_CMD_CHANGE_MTU = 2,
HINIC_PORT_CMD_ADD_VLAN = 3,
@@ -39,6 +54,9 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_SET_RX_MODE = 12,
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 20,
+ HINIC_PORT_CMD_SET_PAUSE_INFO = 21,
+
HINIC_PORT_CMD_GET_LINK_STATE = 24,
HINIC_PORT_CMD_SET_LRO = 25,
@@ -77,19 +95,45 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_FWCTXT_INIT = 69,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 78,
+
HINIC_PORT_CMD_GET_MGMT_VERSION = 88,
HINIC_PORT_CMD_SET_FUNC_STATE = 93,
HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
+ HINIC_PORT_CMD_SET_VF_RATE = 105,
+
+ HINIC_PORT_CMD_SET_VF_VLAN = 106,
+
+ HINIC_PORT_CMD_CLR_VF_VLAN,
+
HINIC_PORT_CMD_SET_TSO = 112,
HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115,
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 160,
+
+ HINIC_PORT_CMD_UPDATE_MAC = 164,
+
HINIC_PORT_CMD_GET_CAP = 170,
+ HINIC_PORT_CMD_GET_LINK_MODE = 217,
+
+ HINIC_PORT_CMD_SET_SPEED = 218,
+
+ HINIC_PORT_CMD_SET_AUTONEG = 219,
+
HINIC_PORT_CMD_SET_LRO_TIMER = 244,
+
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 249,
+};
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
};
enum hinic_ucode_cmd {
@@ -191,6 +235,17 @@ struct hinic_cmd_set_res_state {
u32 rsvd2;
};
+struct hinic_ceq_ctrl_reg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+};
+
struct hinic_cmd_base_qpn {
u8 status;
u8 version;
@@ -219,12 +274,22 @@ struct hinic_cmd_hw_ci {
u64 ci_addr;
};
+struct hinic_cmd_l2nic_reset {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 reset_flag;
+};
+
struct hinic_hwdev {
struct hinic_hwif *hwif;
struct msix_entry *msix_entries;
struct hinic_aeqs aeqs;
struct hinic_func_to_io func_to_io;
+ struct hinic_mbox_func_to_func *func_to_func;
struct hinic_cap nic_cap;
};
@@ -246,6 +311,25 @@ struct hinic_pfhwdev {
struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
};
+struct hinic_dev_cap {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 rsvd1[5];
+ u8 intr_type;
+ u8 max_cos_id;
+ u8 er_id;
+ u8 port_id;
+ u8 max_vf;
+ u8 rsvd2[62];
+ u16 max_sqs;
+ u16 max_rqs;
+ u16 max_vf_sqs;
+ u16 max_vf_rqs;
+ u8 rsvd3[204];
+};
+
void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
enum hinic_mgmt_msg_cmd cmd, void *handle,
void (*handler)(void *handle, void *buf_in,
@@ -259,7 +343,11 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
void *buf_in, u16 in_size, void *buf_out,
u16 *out_size);
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev);
+int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index c0b6bcb067cd..397936cac304 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -17,6 +17,7 @@
#include <asm/byteorder.h>
#include <asm/barrier.h>
+#include "hinic_hw_dev.h"
#include "hinic_hw_csr.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
@@ -416,11 +417,11 @@ static irqreturn_t ceq_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static void set_ctrl0(struct hinic_eq *eq)
+static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr)
{
struct msix_entry *msix_entry = &eq->msix_entry;
enum hinic_eq_type type = eq->type;
- u32 addr, val, ctrl0;
+ u32 val, ctrl0;
if (type == HINIC_AEQ) {
/* RMW Ctrl0 */
@@ -440,9 +441,7 @@ static void set_ctrl0(struct hinic_eq *eq)
HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
+ } else {
/* RMW Ctrl0 */
addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
@@ -462,16 +461,28 @@ static void set_ctrl0(struct hinic_eq *eq)
HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
}
+ return val;
}
-static void set_ctrl1(struct hinic_eq *eq)
+static void set_ctrl0(struct hinic_eq *eq)
{
+ u32 val, addr;
+
+ if (eq->type == HINIC_AEQ)
+ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+ else
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+
+ val = get_ctrl0_val(eq, addr);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr)
+{
+ u32 page_size_val, elem_size, val, ctrl1;
enum hinic_eq_type type = eq->type;
- u32 page_size_val, elem_size;
- u32 addr, val, ctrl1;
if (type == HINIC_AEQ) {
/* RMW Ctrl1 */
@@ -491,9 +502,7 @@ static void set_ctrl1(struct hinic_eq *eq)
HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
+ } else {
/* RMW Ctrl1 */
addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
@@ -508,19 +517,70 @@ static void set_ctrl1(struct hinic_eq *eq)
HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
val |= ctrl1;
+ }
+ return val;
+}
- hinic_hwif_write_reg(eq->hwif, addr, val);
+static void set_ctrl1(struct hinic_eq *eq)
+{
+ u32 addr, val;
+
+ if (eq->type == HINIC_AEQ)
+ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+ else
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+
+ val = get_ctrl1_val(eq, addr);
+
+ hinic_hwif_write_reg(eq->hwif, addr, val);
+}
+
+static int set_ceq_ctrl_reg(struct hinic_eq *eq)
+{
+ struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
+ struct hinic_hwdev *hwdev = eq->hwdev;
+ u16 out_size = sizeof(ceq_ctrl);
+ u16 in_size = sizeof(ceq_ctrl);
+ struct hinic_pfhwdev *pfhwdev;
+ u32 addr;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
+ ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr);
+ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
+ ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr);
+
+ ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ ceq_ctrl.q_id = eq->q_id;
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP,
+ &ceq_ctrl, in_size,
+ &ceq_ctrl, &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || ceq_ctrl.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
+ eq->q_id, err, ceq_ctrl.status, out_size);
+ return -EFAULT;
}
+
+ return 0;
}
/**
* set_eq_ctrls - setting eq's ctrl registers
* @eq: the Event Queue for setting
**/
-static void set_eq_ctrls(struct hinic_eq *eq)
+static int set_eq_ctrls(struct hinic_eq *eq)
{
+ if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ)
+ return set_ceq_ctrl_reg(eq);
+
set_ctrl0(eq);
set_ctrl1(eq);
+ return 0;
}
/**
@@ -703,7 +763,12 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
return -EINVAL;
}
- set_eq_ctrls(eq);
+ err = set_eq_ctrls(eq);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set eq ctrls\n");
+ return err;
+ }
+
eq_update_ci(eq, EQ_ARMED);
err = alloc_eq_pages(eq);
@@ -859,6 +924,7 @@ int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
ceqs->num_ceqs = num_ceqs;
for (q_id = 0; q_id < num_ceqs; q_id++) {
+ ceqs->ceq[q_id].hwdev = ceqs->hwdev;
err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
page_size, msix_entries[q_id]);
if (err) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
index d35f2068ee0c..74b9ff90640c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -143,8 +143,9 @@ enum hinic_eq_type {
};
enum hinic_aeq_type {
+ HINIC_MBX_FROM_FUNC = 1,
HINIC_MSG_FROM_MGMT_CPU = 2,
-
+ HINIC_MBX_SEND_RSLT = 5,
HINIC_MAX_AEQ_EVENTS,
};
@@ -171,7 +172,7 @@ struct hinic_eq_work {
struct hinic_eq {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
enum hinic_eq_type type;
int q_id;
u32 q_len;
@@ -219,7 +220,7 @@ struct hinic_ceq_cb {
struct hinic_ceqs {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct hinic_eq ceq[HINIC_MAX_CEQS];
int num_ceqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 07bbfbf68577..cf127d896ba6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include "hinic_hw_csr.h"
#include "hinic_hw_if.h"
@@ -18,6 +19,8 @@
#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
+#define WAIT_HWIF_READY_TIMEOUT 10000
+
/**
* hinic_msix_attr_set - set message attribute for msix entry
* @hwif: the HW interface of a pci function device
@@ -115,8 +118,12 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
**/
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
{
- u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
+ u32 attr5;
+
+ if (HINIC_IS_VF(hwif))
+ return;
+ attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION);
attr5 |= HINIC_FA5_SET(action, PF_ACTION);
@@ -183,27 +190,47 @@ void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
**/
static int hwif_ready(struct hinic_hwif *hwif)
{
- struct pci_dev *pdev = hwif->pdev;
u32 addr, attr1;
addr = HINIC_CSR_FUNC_ATTR1_ADDR;
attr1 = hinic_hwif_read_reg(hwif, addr);
- if (!HINIC_FA1_GET(attr1, INIT_STATUS)) {
- dev_err(&pdev->dev, "hwif status is not ready\n");
- return -EFAULT;
+ if (!HINIC_FA1_GET(attr1, MGMT_INIT_STATUS))
+ return -EBUSY;
+
+ if (HINIC_IS_VF(hwif)) {
+ if (!HINIC_FA1_GET(attr1, PF_INIT_STATUS))
+ return -EBUSY;
}
return 0;
}
+static int wait_hwif_ready(struct hinic_hwif *hwif)
+{
+ unsigned long timeout = 0;
+
+ do {
+ if (!hwif_ready(hwif))
+ return 0;
+
+ usleep_range(999, 1000);
+ timeout++;
+ } while (timeout <= WAIT_HWIF_READY_TIMEOUT);
+
+ dev_err(&hwif->pdev->dev, "Wait for hwif timeout\n");
+
+ return -EBUSY;
+}
+
/**
* set_hwif_attr - set the attributes in the relevant members in hwif
* @hwif: the HW interface of a pci function device
* @attr0: the first attribute that was read from the hw
* @attr1: the second attribute that was read from the hw
**/
-static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
+static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
+ u32 attr2)
{
hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX);
hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX);
@@ -214,6 +241,8 @@ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC));
hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC));
hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC));
+ hwif->attr.global_vf_id_of_pf = HINIC_FA2_GET(attr2,
+ GLOBAL_VF_ID_OF_PF);
}
/**
@@ -222,7 +251,7 @@ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
**/
static void read_hwif_attr(struct hinic_hwif *hwif)
{
- u32 addr, attr0, attr1;
+ u32 addr, attr0, attr1, attr2;
addr = HINIC_CSR_FUNC_ATTR0_ADDR;
attr0 = hinic_hwif_read_reg(hwif, addr);
@@ -230,7 +259,10 @@ static void read_hwif_attr(struct hinic_hwif *hwif)
addr = HINIC_CSR_FUNC_ATTR1_ADDR;
attr1 = hinic_hwif_read_reg(hwif, addr);
- set_hwif_attr(hwif, attr0, attr1);
+ addr = HINIC_CSR_FUNC_ATTR2_ADDR;
+ attr2 = hinic_hwif_read_reg(hwif, addr);
+
+ set_hwif_attr(hwif, attr0, attr1, attr2);
}
/**
@@ -309,6 +341,34 @@ static void dma_attr_init(struct hinic_hwif *hwif)
HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE);
}
+u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif)
+{
+ if (!hwif)
+ return 0;
+
+ return hwif->attr.global_vf_id_of_pf;
+}
+
+u16 hinic_global_func_id_hw(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ return HINIC_FA0_GET(attr0, FUNC_IDX);
+}
+
+u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif)
+{
+ u32 addr, attr0;
+
+ addr = HINIC_CSR_FUNC_ATTR0_ADDR;
+ attr0 = hinic_hwif_read_reg(hwif, addr);
+
+ return HINIC_FA0_GET(attr0, PF_IDX);
+}
+
/**
* hinic_init_hwif - initialize the hw interface
* @hwif: the HW interface of a pci function device
@@ -335,7 +395,7 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
goto err_map_intr_bar;
}
- err = hwif_ready(hwif);
+ err = wait_hwif_ready(hwif);
if (err) {
dev_err(&pdev->dev, "HW interface is not ready\n");
goto err_hwif_ready;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index c7bb9ceca72c..0872e035faa1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -35,6 +35,7 @@
#define HINIC_FA0_FUNC_IDX_SHIFT 0
#define HINIC_FA0_PF_IDX_SHIFT 10
#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14
+#define HINIC_FA0_VF_IN_PF_SHIFT 16
/* reserved members - off 16 */
#define HINIC_FA0_FUNC_TYPE_SHIFT 24
@@ -42,6 +43,7 @@
#define HINIC_FA0_PF_IDX_MASK 0xF
#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3
#define HINIC_FA0_FUNC_TYPE_MASK 0x1
+#define HINIC_FA0_VF_IN_PF_MASK 0xFF
#define HINIC_FA0_GET(val, member) \
(((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK)
@@ -53,17 +55,25 @@
#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
/* reserved members - off 27 */
-#define HINIC_FA1_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_MGMT_INIT_STATUS_SHIFT 30
+#define HINIC_FA1_PF_INIT_STATUS_SHIFT 31
#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
-#define HINIC_FA1_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_MGMT_INIT_STATUS_MASK 0x1
+#define HINIC_FA1_PF_INIT_STATUS_MASK 0x1
#define HINIC_FA1_GET(val, member) \
(((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
+#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_SHIFT 16
+#define HINIC_FA2_GLOBAL_VF_ID_OF_PF_MASK 0x3FF
+
+#define HINIC_FA2_GET(val, member) \
+ (((val) >> HINIC_FA2_##member##_SHIFT) & HINIC_FA2_##member##_MASK)
+
#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0
#define HINIC_FA4_DB_STATE_SHIFT 1
@@ -140,6 +150,7 @@
#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
+#define HINIC_IS_VF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_VF)
#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
@@ -173,6 +184,7 @@ enum hinic_pcie_tph {
enum hinic_func_type {
HINIC_PF = 0,
+ HINIC_VF = 1,
HINIC_PPF = 2,
};
@@ -180,7 +192,7 @@ enum hinic_mod_type {
HINIC_MOD_COMM = 0, /* HW communication module */
HINIC_MOD_L2NIC = 1, /* L2NIC module */
HINIC_MOD_CFGM = 7, /* Configuration module */
-
+ HINIC_MOD_HILINK = 14, /* Hilink module */
HINIC_MOD_MAX = 15
};
@@ -223,6 +235,8 @@ struct hinic_func_attr {
u8 num_ceqs;
u8 num_dma_attr;
+
+ u16 global_vf_id_of_pf;
};
struct hinic_hwif {
@@ -271,6 +285,12 @@ enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif);
void hinic_db_state_set(struct hinic_hwif *hwif,
enum hinic_db_state db_state);
+u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif);
+
+u16 hinic_global_func_id_hw(struct hinic_hwif *hwif);
+
+u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif);
+
int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev);
void hinic_free_hwif(struct hinic_hwif *hwif);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index d66f86fa3f46..3e3fa742e476 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/err.h>
+#include "hinic_hw_dev.h"
#include "hinic_hw_if.h"
#include "hinic_hw_eqs.h"
#include "hinic_hw_wqe.h"
@@ -34,6 +35,8 @@
#define DB_IDX(db, db_base) \
(((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
+#define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
+
enum io_cmd {
IO_CMD_MODIFY_QUEUE_CTXT = 0,
IO_CMD_CLEAN_QUEUE_CTXT,
@@ -279,7 +282,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
- HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
+ func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE);
if (err) {
dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
return err;
@@ -287,7 +290,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
- HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
+ func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
if (err) {
dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
goto err_rq_alloc;
@@ -484,6 +487,33 @@ void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
devm_kfree(&pdev->dev, func_to_io->qps);
}
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct hinic_wq_page_size page_size_info = {0};
+ u16 out_size = sizeof(page_size_info);
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ page_size_info.func_idx = func_idx;
+ page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
+ page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size);
+
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ HINIC_COMM_CMD_PAGESIZE_SET, &page_size_info,
+ sizeof(page_size_info), &page_size_info,
+ &out_size, HINIC_MGMT_MSG_SYNC);
+ if (err || !out_size || page_size_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
+ err, page_size_info.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
/**
* hinic_io_init - Initialize the IO components
* @func_to_io: func to io channel that holds the IO components
@@ -506,6 +536,7 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
func_to_io->hwif = hwif;
func_to_io->qps = NULL;
func_to_io->max_qps = max_qps;
+ func_to_io->ceqs.hwdev = func_to_io->hwdev;
err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
@@ -541,6 +572,14 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
func_to_io->cmdq_db_area[cmdq] = db_area;
}
+ err = hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(hwif),
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err) {
+ dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n");
+ goto init_wq_pg_size_err;
+ }
+
err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
func_to_io->cmdq_db_area);
if (err) {
@@ -551,6 +590,11 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io,
return 0;
err_init_cmdqs:
+ if (!HINIC_IS_VF(func_to_io->hwif))
+ hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(hwif),
+ HINIC_HW_WQ_PAGE_SIZE);
+init_wq_pg_size_err:
err_db_area:
for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
@@ -575,6 +619,11 @@ void hinic_io_free(struct hinic_func_to_io *func_to_io)
hinic_free_cmdqs(&func_to_io->cmdqs);
+ if (!HINIC_IS_VF(func_to_io->hwif))
+ hinic_set_wq_page_size(func_to_io->hwdev,
+ HINIC_HWIF_FUNC_IDX(func_to_io->hwif),
+ HINIC_HW_WQ_PAGE_SIZE);
+
for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
index cac2b722e7dc..214f162f7579 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
@@ -20,6 +20,8 @@
#define HINIC_DB_PAGE_SIZE SZ_4K
#define HINIC_DB_SIZE SZ_4M
+#define HINIC_HW_WQ_PAGE_SIZE SZ_4K
+#define HINIC_DEFAULT_WQ_PAGE_SIZE SZ_256K
#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE)
@@ -47,7 +49,7 @@ struct hinic_free_db_area {
struct hinic_func_to_io {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct hinic_ceqs ceqs;
struct hinic_wqs wqs;
@@ -58,6 +60,9 @@ struct hinic_func_to_io {
struct hinic_qp *qps;
u16 max_qps;
+ u16 sq_depth;
+ u16 rq_depth;
+
void __iomem **sq_db;
void __iomem *db_base;
@@ -69,8 +74,27 @@ struct hinic_func_to_io {
void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES];
struct hinic_cmdqs cmdqs;
+
+ u16 max_vfs;
+ struct vf_data_storage *vf_infos;
+ u8 link_status;
+};
+
+struct hinic_wq_page_size {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 page_size;
+
+ u32 rsvd1;
};
+int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+
int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
u16 base_qpn, int num_qps,
struct msix_entry *sq_msix_entries,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
new file mode 100644
index 000000000000..bc2f87e6cb5d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "hinic_hw_if.h"
+#include "hinic_hw_mgmt.h"
+#include "hinic_hw_csr.h"
+#include "hinic_hw_dev.h"
+#include "hinic_hw_mbox.h"
+
+#define HINIC_MBOX_INT_DST_FUNC_SHIFT 0
+#define HINIC_MBOX_INT_DST_AEQN_SHIFT 10
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12
+#define HINIC_MBOX_INT_STAT_DMA_SHIFT 14
+/* The size of data to be sended (unit of 4 bytes) */
+#define HINIC_MBOX_INT_TX_SIZE_SHIFT 20
+/* SO_RO(strong order, relax order) */
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25
+#define HINIC_MBOX_INT_WB_EN_SHIFT 28
+
+#define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF
+#define HINIC_MBOX_INT_DST_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3
+#define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F
+#define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F
+#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3
+#define HINIC_MBOX_INT_WB_EN_MASK 0x1
+
+#define HINIC_MBOX_INT_SET(val, field) \
+ (((val) & HINIC_MBOX_INT_##field##_MASK) << \
+ HINIC_MBOX_INT_##field##_SHIFT)
+
+enum hinic_mbox_tx_status {
+ TX_NOT_DONE = 1,
+};
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0
+
+/* specifies the issue request for the message data.
+ * 0 - Tx request is done;
+ * 1 - Tx request is in process.
+ */
+#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1
+
+#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1
+#define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1
+
+#define HINIC_MBOX_CTRL_SET(val, field) \
+ (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
+ HINIC_MBOX_CTRL_##field##_SHIFT)
+
+#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MBOX_HEADER_MODULE_SHIFT 11
+#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MBOX_HEADER_SEQID_SHIFT 24
+#define HINIC_MBOX_HEADER_LAST_SHIFT 30
+
+/* specifies the mailbox message direction
+ * 0 - send
+ * 1 - receive
+ */
+#define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MBOX_HEADER_CMD_SHIFT 32
+#define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40
+#define HINIC_MBOX_HEADER_STATUS_SHIFT 48
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54
+
+#define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MBOX_HEADER_MODULE_MASK 0x1F
+#define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MBOX_HEADER_SEQID_MASK 0x3F
+#define HINIC_MBOX_HEADER_LAST_MASK 0x1
+#define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MBOX_HEADER_CMD_MASK 0xFF
+#define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF
+#define HINIC_MBOX_HEADER_STATUS_MASK 0x3F
+#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF
+
+#define HINIC_MBOX_HEADER_GET(val, field) \
+ (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
+ HINIC_MBOX_HEADER_##field##_MASK)
+#define HINIC_MBOX_HEADER_SET(val, field) \
+ ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
+ HINIC_MBOX_HEADER_##field##_SHIFT)
+
+#define MBOX_SEGLEN_MASK \
+ HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
+
+#define HINIC_MBOX_SEG_LEN 48
+#define HINIC_MBOX_COMP_TIME 8000U
+#define MBOX_MSG_POLLING_TIMEOUT 8000
+
+#define HINIC_MBOX_DATA_SIZE 2040
+
+#define MBOX_MAX_BUF_SZ 2048UL
+#define MBOX_HEADER_SZ 8
+
+#define MBOX_INFO_SZ 4
+
+/* MBOX size is 64B, 8B for mbox_header, 4B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16UL
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define SEQ_ID_START_VAL 0
+#define SEQ_ID_MAX_VAL 42
+
+#define DST_AEQ_IDX_DEFAULT_VAL 0
+#define SRC_AEQ_IDX_DEFAULT_VAL 0
+#define NO_DMA_ATTRIBUTE_VAL 0
+
+#define HINIC_MGMT_RSP_AEQN 0
+#define HINIC_MBOX_RSP_AEQN 2
+#define HINIC_MBOX_RECV_AEQN 0
+
+#define MBOX_MSG_NO_DATA_LEN 1
+
+#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ)
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
+
+#define MBOX_RESPONSE_ERROR 0x1
+#define MBOX_MSG_ID_MASK 0xFF
+#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
+#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
+ (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
+
+#define FUNC_ID_OFF_SET_8B 8
+#define FUNC_ID_OFF_SET_10B 10
+
+/* max message counter wait to process for one function */
+#define HINIC_MAX_MSG_CNT_TO_PROCESS 10
+
+#define HINIC_QUEUE_MIN_DEPTH 6
+#define HINIC_QUEUE_MAX_DEPTH 12
+#define HINIC_MAX_RX_BUFFER_SIZE 15
+
+enum hinic_hwif_direction_type {
+ HINIC_HWIF_DIRECT_SEND = 0,
+ HINIC_HWIF_RESPONSE = 1,
+};
+
+enum mbox_send_mod {
+ MBOX_SEND_MSG_INT,
+};
+
+enum mbox_seg_type {
+ NOT_LAST_SEG,
+ LAST_SEG,
+};
+
+enum mbox_ordering_type {
+ STRONG_ORDER,
+};
+
+enum mbox_write_back_type {
+ WRITE_BACK = 1,
+};
+
+enum mbox_aeq_trig_type {
+ NOT_TRIGGER,
+ TRIGGER,
+};
+
+/**
+ * hinic_register_pf_mbox_cb - register mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->pf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_register_vf_mbox_cb - register mbox callback for vf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ * @callback: callback function
+ * Return: 0 - success, negative - failure
+ */
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ if (mod >= HINIC_MOD_MAX)
+ return -EFAULT;
+
+ func_to_func->vf_mbox_cb[mod] = callback;
+
+ set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ return 0;
+}
+
+/**
+ * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->pf_mbox_cb[mod] = NULL;
+}
+
+/**
+ * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
+ * @hwdev: the pointer to hw device
+ * @mod: specific mod that the callback will handle
+ */
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
+
+ while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[mod]))
+ usleep_range(900, 1000);
+
+ func_to_func->vf_mbox_cb[mod] = NULL;
+}
+
+static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ void *buf_out, u16 *out_size)
+{
+ hinic_vf_mbox_cb cb;
+ int ret = 0;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
+ cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
+ recv_mbox->mbox_len, buf_out, out_size);
+ } else {
+ dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_VF_MBOX_CB_RUNNING,
+ &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
+static int
+recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx, void *buf_out,
+ u16 *out_size)
+{
+ hinic_pf_mbox_cb cb;
+ u16 vf_id = 0;
+ int ret;
+
+ if (recv_mbox->mod >= HINIC_MOD_MAX) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
+ recv_mbox->mod);
+ return -EINVAL;
+ }
+
+ set_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
+ if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
+ vf_id = src_func_idx -
+ hinic_glb_pf_vf_offset(func_to_func->hwif);
+ ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
+ recv_mbox->mbox, recv_mbox->mbox_len,
+ buf_out, out_size);
+ } else {
+ dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
+ recv_mbox->mod);
+ ret = -EINVAL;
+ }
+
+ clear_bit(HINIC_PF_MBOX_CB_RUNNING,
+ &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
+
+ return ret;
+}
+
+static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_mbox->seq_id = seq_id;
+ } else {
+ if (seq_id != recv_mbox->seq_id + 1)
+ return false;
+
+ recv_mbox->seq_id = seq_id;
+ }
+
+ return true;
+}
+
+static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
+ func_to_func->event_flag == EVENT_START)
+ complete(&recv_mbox->recv_done);
+ else
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
+ func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
+ recv_mbox->msg_info.status);
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx);
+
+static void recv_func_mbox_work_handler(struct work_struct *work)
+{
+ struct hinic_mbox_work *mbox_work =
+ container_of(work, struct hinic_mbox_work, work);
+ struct hinic_recv_mbox *recv_mbox;
+
+ recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
+ mbox_work->src_func_idx);
+
+ recv_mbox =
+ &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
+
+ atomic_dec(&recv_mbox->msg_cnt);
+
+ kfree(mbox_work);
+}
+
+static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ void *header, struct hinic_recv_mbox *recv_mbox)
+{
+ void *mbox_body = MBOX_BODY_FROM_HDR(header);
+ struct hinic_recv_mbox *rcv_mbox_temp = NULL;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_mbox_work *mbox_work;
+ u8 seq_id, seg_len;
+ u16 src_func_idx;
+ int pos;
+
+ seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
+ seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
+ src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
+ src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
+ return;
+ }
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
+ HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
+
+ if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
+ return;
+
+ recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
+ recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
+ recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
+ recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
+ recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
+ recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
+ recv_mbox->seq_id = SEQ_ID_MAX_VAL;
+
+ if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
+ HINIC_HWIF_RESPONSE) {
+ resp_mbox_handler(func_to_func, recv_mbox);
+ return;
+ }
+
+ if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
+ dev_warn(&func_to_func->hwif->pdev->dev,
+ "This function(%u) have %d message wait to process,can't add to work queue\n",
+ src_func_idx, atomic_read(&recv_mbox->msg_cnt));
+ return;
+ }
+
+ rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL);
+ if (!rcv_mbox_temp)
+ return;
+
+ rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ,
+ GFP_KERNEL);
+ if (!rcv_mbox_temp->mbox)
+ goto err_alloc_rcv_mbox_msg;
+
+ rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!rcv_mbox_temp->buf_out)
+ goto err_alloc_rcv_mbox_buf;
+
+ mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
+ if (!mbox_work)
+ goto err_alloc_mbox_work;
+
+ mbox_work->func_to_func = func_to_func;
+ mbox_work->recv_mbox = rcv_mbox_temp;
+ mbox_work->src_func_idx = src_func_idx;
+
+ atomic_inc(&recv_mbox->msg_cnt);
+ INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
+ queue_work(func_to_func->workq, &mbox_work->work);
+
+ return;
+
+err_alloc_mbox_work:
+ kfree(rcv_mbox_temp->buf_out);
+
+err_alloc_rcv_mbox_buf:
+ kfree(rcv_mbox_temp->mbox);
+
+err_alloc_rcv_mbox_msg:
+ kfree(rcv_mbox_temp);
+}
+
+void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ u64 mbox_header = *((u64 *)header);
+ struct hinic_recv_mbox *recv_mbox;
+ u64 src, dir;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+
+ dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
+ src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+
+ if (src >= HINIC_MAX_FUNCTIONS) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mailbox source function id:%u is invalid\n", (u32)src);
+ return;
+ }
+
+ recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
+ &func_to_func->mbox_send[src] :
+ &func_to_func->mbox_resp[src];
+
+ recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
+}
+
+void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_send_mbox *send_mbox;
+
+ func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
+ send_mbox = &func_to_func->send_mbox;
+
+ complete(&send_mbox->send_done);
+}
+
+static void clear_mbox_status(struct hinic_send_mbox *mbox)
+{
+ *mbox->wb_status = 0;
+
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_copy_header(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, u64 *header)
+{
+ u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
+ u32 *data = (u32 *)header;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i), mbox->data + i * sizeof(u32));
+}
+
+static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
+ struct hinic_send_mbox *mbox, void *seg,
+ u16 seg_len)
+{
+ u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
+ u32 data_len, chk_sz = sizeof(u32);
+ u32 *data = seg;
+ u32 i, idx_max;
+
+ /* The mbox message should be aligned in 4 bytes. */
+ if (seg_len % chk_sz) {
+ memcpy(mbox_max_buf, seg, seg_len);
+ data = (u32 *)mbox_max_buf;
+ }
+
+ data_len = seg_len;
+ idx_max = ALIGN(data_len, chk_sz) / chk_sz;
+
+ for (i = 0; i < idx_max; i++)
+ __raw_writel(*(data + i),
+ mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
+}
+
+static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
+ u16 dst_func, u16 dst_aeqn, u16 seg_len,
+ int poll)
+{
+ u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
+ u32 mbox_int, mbox_ctrl;
+
+ mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
+ HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
+ HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
+ HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
+ MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
+ TX_SIZE) |
+ HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
+ HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
+
+ hinic_hwif_write_reg(func_to_func->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
+
+ wmb(); /* writing the mbox int attributes */
+ mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
+
+ if (poll)
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
+ else
+ mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
+
+ hinic_hwif_write_reg(func_to_func->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
+}
+
+static void dump_mox_reg(struct hinic_hwdev *hwdev)
+{
+ u32 val;
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
+ dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
+
+ val = hinic_hwif_read_reg(hwdev->hwif,
+ HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
+ dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
+ val);
+}
+
+static u16 get_mbox_status(struct hinic_send_mbox *mbox)
+{
+ /* write back is 16B, but only use first 4B */
+ u64 wb_val = be64_to_cpu(*mbox->wb_status);
+
+ rmb(); /* verify reading before check */
+
+ return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
+}
+
+static int
+wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
+ int poll, u16 *wb_status)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct completion *done = &send_mbox->send_done;
+ u32 cnt = 0;
+ unsigned long jif;
+
+ if (poll) {
+ while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
+ *wb_status = get_mbox_status(send_mbox);
+ if (MBOX_STATUS_FINISHED(*wb_status))
+ break;
+
+ usleep_range(900, 1000);
+ cnt++;
+ }
+
+ if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
+ *wb_status);
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+ } else {
+ jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(done, jif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
+ dump_mox_reg(hwdev);
+ return -ETIMEDOUT;
+ }
+
+ *wb_status = get_mbox_status(send_mbox);
+ }
+
+ return 0;
+}
+
+static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
+ u64 header, u16 dst_func, void *seg, u16 seg_len,
+ int poll, void *msg_info)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ struct completion *done = &send_mbox->send_done;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ u16 dst_aeqn, wb_status = 0, errcode;
+
+ if (num_aeqs >= 4)
+ dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
+ HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
+ else
+ dst_aeqn = 0;
+
+ if (!poll)
+ init_completion(done);
+
+ clear_mbox_status(send_mbox);
+
+ mbox_copy_header(hwdev, send_mbox, &header);
+
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+
+ write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
+
+ wmb(); /* writing the mbox msg attributes */
+
+ if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
+ return -ETIMEDOUT;
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, void *msg,
+ u16 msg_len, u16 dst_func,
+ enum hinic_hwif_direction_type direction,
+ enum hinic_mbox_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u16 seg_len = MBOX_SEG_LEN;
+ u8 *msg_seg = (u8 *)msg;
+ u16 left = msg_len;
+ u32 seq_id = 0;
+ u64 header = 0;
+ int err = 0;
+
+ down(&func_to_func->msg_send_sem);
+
+ header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MBOX_HEADER_SET(mod, MODULE) |
+ HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
+ HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
+ HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
+ HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
+ HINIC_MBOX_HEADER_SET(cmd, CMD) |
+ /* The vf's offset to it's associated pf */
+ HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
+ HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
+ SRC_GLB_FUNC_IDX);
+
+ while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
+ if (left <= HINIC_MBOX_SEG_LEN) {
+ header &= ~MBOX_SEGLEN_MASK;
+ header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
+ header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
+
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
+ seg_len, MBOX_SEND_MSG_INT, msg_info);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ HINIC_MBOX_HEADER_GET(header, SEQID));
+ goto err_send_mbox_seg;
+ }
+
+ left -= HINIC_MBOX_SEG_LEN;
+ msg_seg += HINIC_MBOX_SEG_LEN;
+
+ seq_id++;
+ header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
+ SEQID));
+ header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
+ }
+
+err_send_mbox_seg:
+ up(&func_to_func->msg_send_sem);
+
+ return err;
+}
+
+static void
+response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox, int err,
+ u16 out_size, u16 src_func_idx)
+{
+ struct mbox_msg_info msg_info = {0};
+
+ if (recv_mbox->ack_type == MBOX_ACK) {
+ msg_info.msg_id = recv_mbox->msg_info.msg_id;
+ if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
+ else if (err == HINIC_MBOX_VF_CMD_ERROR)
+ msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
+ else if (err)
+ msg_info.status = HINIC_MBOX_PF_SEND_ERR;
+
+ /* if no data needs to response, set out_size to 1 */
+ if (!out_size || err)
+ out_size = MBOX_MSG_NO_DATA_LEN;
+
+ send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
+ recv_mbox->buf_out, out_size, src_func_idx,
+ HINIC_HWIF_RESPONSE, MBOX_ACK,
+ &msg_info);
+ }
+}
+
+static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *recv_mbox,
+ u16 src_func_idx)
+{
+ void *buf_out = recv_mbox->buf_out;
+ u16 out_size = MBOX_MAX_BUF_SZ;
+ int err = 0;
+
+ if (HINIC_IS_VF(func_to_func->hwif)) {
+ err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
+ &out_size);
+ } else {
+ if (IS_PF_OR_PPF_SRC(src_func_idx))
+ dev_warn(&func_to_func->hwif->pdev->dev,
+ "Unsupported pf2pf mbox msg\n");
+ else
+ err = recv_pf_from_vf_mbox_handler(func_to_func,
+ recv_mbox,
+ src_func_idx,
+ buf_out, &out_size);
+ }
+
+ response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
+ src_func_idx);
+ kfree(recv_mbox->buf_out);
+ kfree(recv_mbox->mbox);
+ kfree(recv_mbox);
+}
+
+static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&func_to_func->mbox_lock);
+ func_to_func->event_flag = event_flag;
+ spin_unlock(&func_to_func->mbox_lock);
+}
+
+static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
+ struct hinic_recv_mbox *mbox_for_resp,
+ enum hinic_mod_type mod, u16 cmd,
+ void *buf_out, u16 *out_size)
+{
+ int err;
+
+ if (mbox_for_resp->msg_info.status) {
+ err = mbox_for_resp->msg_info.status;
+ if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
+ mbox_for_resp->msg_info.status);
+ return err;
+ }
+
+ if (buf_out && out_size) {
+ if (*out_size < mbox_for_resp->mbox_len) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
+ mbox_for_resp->mbox_len, mod, cmd, *out_size);
+ return -EFAULT;
+ }
+
+ if (mbox_for_resp->mbox_len)
+ memcpy(buf_out, mbox_for_resp->mbox,
+ mbox_for_resp->mbox_len);
+
+ *out_size = mbox_for_resp->mbox_len;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_recv_mbox *mbox_for_resp;
+ struct mbox_msg_info msg_info = {0};
+ unsigned long timeo;
+ int err;
+
+ mbox_for_resp = &func_to_func->mbox_resp[dst_func];
+
+ down(&func_to_func->mbox_send_sem);
+
+ init_completion(&mbox_for_resp->recv_done);
+
+ msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
+
+ set_mbox_to_func_event(func_to_func, EVENT_START);
+
+ err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
+ dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
+ &msg_info);
+ if (err) {
+ dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
+ msg_info.msg_id);
+ set_mbox_to_func_event(func_to_func, EVENT_FAIL);
+ goto err_send_mbox;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
+ if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
+ set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
+ err = -ETIMEDOUT;
+ goto err_send_mbox;
+ }
+
+ set_mbox_to_func_event(func_to_func, EVENT_END);
+
+ err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
+ buf_out, out_size);
+
+err_send_mbox:
+ up(&func_to_func->mbox_send_sem);
+
+ return err;
+}
+
+static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
+ void *buf_in, u16 in_size)
+{
+ if (in_size > HINIC_MBOX_DATA_SIZE) {
+ dev_err(&func_to_func->hwif->pdev->dev,
+ "Mbox msg len(%d) exceed limit(%d)\n",
+ in_size, HINIC_MBOX_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+ int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+
+ if (err)
+ return err;
+
+ if (!HINIC_IS_VF(hwdev->hwif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
+ HINIC_FUNC_TYPE(hwdev->hwif));
+ return -EINVAL;
+ }
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd,
+ hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ u16 dst_func_idx;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ func_to_func = hwdev->func_to_func;
+ err = mbox_func_params_valid(func_to_func, buf_in, in_size);
+ if (err)
+ return err;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
+ HINIC_FUNC_TYPE(hwdev->hwif));
+ return -EINVAL;
+ }
+
+ if (!vf_id) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "VF id(%d) error!\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* vf_offset_to_pf + vf_id is the vf's global function id of vf in
+ * this pf
+ */
+ dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+
+ return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
+ in_size, buf_out, out_size, timeout);
+}
+
+static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ int err;
+
+ mbox_info->seq_id = SEQ_ID_MAX_VAL;
+
+ mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->mbox)
+ return -ENOMEM;
+
+ mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!mbox_info->buf_out) {
+ err = -ENOMEM;
+ goto err_alloc_buf_out;
+ }
+
+ atomic_set(&mbox_info->msg_cnt, 0);
+
+ return 0;
+
+err_alloc_buf_out:
+ kfree(mbox_info->mbox);
+
+ return err;
+}
+
+static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ kfree(mbox_info->buf_out);
+ kfree(mbox_info->mbox);
+}
+
+static int alloc_mbox_info(struct hinic_hwdev *hwdev,
+ struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx, i;
+ int err;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
+ err = init_mbox_info(&mbox_info[func_idx]);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
+ func_idx);
+ goto err_init_mbox_info;
+ }
+ }
+
+ return 0;
+
+err_init_mbox_info:
+ for (i = 0; i < func_idx; i++)
+ clean_mbox_info(&mbox_info[i]);
+
+ return err;
+}
+
+static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
+{
+ u16 func_idx;
+
+ for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
+ clean_mbox_info(&mbox_info[func_idx]);
+}
+
+static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+
+ send_mbox->data = MBOX_AREA(func_to_func->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ send_mbox->wb_status = send_mbox->wb_vaddr;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
+{
+ struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
+ struct hinic_hwdev *hwdev = func_to_func->hwdev;
+
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr,
+ send_mbox->wb_paddr);
+}
+
+static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hwdev = handle;
+ struct hinic_pfhwdev *pfhwdev;
+ int err = 0;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+ if (cmd == HINIC_COMM_CMD_START_FLR) {
+ *out_size = 0;
+ } else {
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+ cmd, buf_in, in_size, buf_out, out_size,
+ HINIC_MGMT_MSG_SYNC);
+ if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "PF mbox common callback handler err: %d\n",
+ err);
+ }
+
+ return err;
+}
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_pfhwdev *pfhwdev;
+ int err;
+
+ pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+ func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
+ if (!func_to_func)
+ return -ENOMEM;
+
+ hwdev->func_to_func = func_to_func;
+ func_to_func->hwdev = hwdev;
+ func_to_func->hwif = hwdev->hwif;
+ sema_init(&func_to_func->mbox_send_sem, 1);
+ sema_init(&func_to_func->msg_send_sem, 1);
+ spin_lock_init(&func_to_func->mbox_lock);
+ func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
+ if (!func_to_func->workq) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
+ err = -ENOMEM;
+ goto err_create_mbox_workq;
+ }
+
+ err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
+ goto err_alloc_mbox_for_send;
+ }
+
+ err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
+ goto err_alloc_mbox_for_resp;
+ }
+
+ err = alloc_mbox_wb_status(func_to_func);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
+ goto err_alloc_wb_status;
+ }
+
+ prepare_send_mbox(func_to_func);
+
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
+ &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
+ hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
+ &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
+ comm_pf_mbox_handler);
+
+ return 0;
+
+err_alloc_wb_status:
+ free_mbox_info(func_to_func->mbox_resp);
+
+err_alloc_mbox_for_resp:
+ free_mbox_info(func_to_func->mbox_send);
+
+err_alloc_mbox_for_send:
+ destroy_workqueue(func_to_func->workq);
+
+err_create_mbox_workq:
+ kfree(func_to_func);
+
+ return err;
+}
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
+
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
+ hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
+
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
+ /* destroy workqueue before free related mbox resources in case of
+ * illegal resource access
+ */
+ destroy_workqueue(func_to_func->workq);
+
+ free_mbox_wb_status(func_to_func);
+ free_mbox_info(func_to_func->mbox_resp);
+ free_mbox_info(func_to_func->mbox_send);
+
+ kfree(func_to_func);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
new file mode 100644
index 000000000000..7b18559bfe80
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_MBOX_H_
+#define HINIC_MBOX_H_
+
+#define HINIC_MBOX_PF_SEND_ERR 0x1
+#define HINIC_MBOX_PF_BUSY_ACTIVE_FW 0x2
+#define HINIC_MBOX_VF_CMD_ERROR 0x3
+
+#define HINIC_MAX_FUNCTIONS 512
+
+#define HINIC_MAX_PF_FUNCS 16
+
+#define HINIC_MBOX_WQ_NAME "hinic_mbox"
+
+#define HINIC_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF 0x0100
+#define HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF 0x0104
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108
+#define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C
+
+enum hinic_mbox_ack_type {
+ MBOX_ACK,
+ MBOX_NO_ACK,
+};
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status;
+};
+
+struct hinic_recv_mbox {
+ struct completion recv_done;
+ void *mbox;
+ u8 cmd;
+ enum hinic_mod_type mod;
+ u16 mbox_len;
+ void *buf_out;
+ enum hinic_mbox_ack_type ack_type;
+ struct mbox_msg_info msg_info;
+ u8 seq_id;
+ atomic_t msg_cnt;
+};
+
+struct hinic_send_mbox {
+ struct completion send_done;
+ u8 *data;
+
+ u64 *wb_status;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+typedef void (*hinic_vf_mbox_cb)(void *handle, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+enum mbox_event_state {
+ EVENT_START = 0,
+ EVENT_FAIL,
+ EVENT_TIMEOUT,
+ EVENT_END,
+};
+
+enum hinic_mbox_cb_state {
+ HINIC_VF_MBOX_CB_REG = 0,
+ HINIC_VF_MBOX_CB_RUNNING,
+ HINIC_PF_MBOX_CB_REG,
+ HINIC_PF_MBOX_CB_RUNNING,
+ HINIC_PPF_MBOX_CB_REG,
+ HINIC_PPF_MBOX_CB_RUNNING,
+ HINIC_PPF_TO_PF_MBOX_CB_REG,
+ HINIC_PPF_TO_PF_MBOX_CB_RUNNIG,
+};
+
+struct hinic_mbox_func_to_func {
+ struct hinic_hwdev *hwdev;
+ struct hinic_hwif *hwif;
+
+ struct semaphore mbox_send_sem;
+ struct semaphore msg_send_sem;
+ struct hinic_send_mbox send_mbox;
+
+ struct workqueue_struct *workq;
+
+ struct hinic_recv_mbox mbox_resp[HINIC_MAX_FUNCTIONS];
+ struct hinic_recv_mbox mbox_send[HINIC_MAX_FUNCTIONS];
+
+ hinic_vf_mbox_cb vf_mbox_cb[HINIC_MOD_MAX];
+ hinic_pf_mbox_cb pf_mbox_cb[HINIC_MOD_MAX];
+ unsigned long pf_mbox_cb_state[HINIC_MOD_MAX];
+ unsigned long vf_mbox_cb_state[HINIC_MOD_MAX];
+
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+};
+
+struct hinic_mbox_work {
+ struct work_struct work;
+ u16 src_func_idx;
+ struct hinic_mbox_func_to_func *func_to_func;
+ struct hinic_recv_mbox *recv_mbox;
+};
+
+struct vf_cmd_msg_handle {
+ u8 cmd;
+ int (*cmd_msg_handler)(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size);
+};
+
+int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_pf_mbox_cb callback);
+
+int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod,
+ hinic_vf_mbox_cb callback);
+
+void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod);
+
+void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size);
+
+void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size);
+
+int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod,
+ u8 cmd, void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
+ enum hinic_mod_type mod, u16 cmd, u16 dst_func,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
+ enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 992908e6eebf..c33eb1147055 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -361,7 +361,11 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
- return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ if (HINIC_IS_VF(hwif))
+ return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
+ in_size, buf_out, out_size, 0);
+ else
+ return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
MSG_NOT_RESP, timeout);
}
@@ -398,8 +402,8 @@ static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
recv_msg->msg, recv_msg->msg_len,
buf_out, &out_size);
else
- dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n",
- recv_msg->mod);
+ dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
+ recv_msg->mod, recv_msg->cmd);
mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
@@ -561,6 +565,10 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
int err;
pf_to_mgmt->hwif = hwif;
+ pf_to_mgmt->hwdev = hwdev;
+
+ if (HINIC_IS_VF(hwif))
+ return 0;
sema_init(&pf_to_mgmt->sync_msg_lock, 1);
pf_to_mgmt->sync_msg_id = 0;
@@ -592,6 +600,9 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ if (HINIC_IS_VF(hwdev->hwif))
+ return;
+
hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
index 182fba17b643..c2b142c08b0e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -60,7 +60,9 @@ enum hinic_cfg_cmd {
};
enum hinic_comm_cmd {
+ HINIC_COMM_CMD_START_FLR = 0x1,
HINIC_COMM_CMD_IO_STATUS_GET = 0x3,
+ HINIC_COMM_CMD_DMA_ATTR_SET = 0x4,
HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10,
HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11,
@@ -74,7 +76,13 @@ enum hinic_comm_cmd {
HINIC_COMM_CMD_IO_RES_CLEAR = 0x29,
- HINIC_COMM_CMD_MAX = 0x32,
+ HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
+
+ HINIC_COMM_CMD_L2NIC_RESET = 0x4b,
+
+ HINIC_COMM_CMD_PAGESIZE_SET = 0x50,
+
+ HINIC_COMM_CMD_MAX = 0x51,
};
enum hinic_mgmt_cb_state {
@@ -107,7 +115,7 @@ struct hinic_mgmt_cb {
struct hinic_pf_to_mgmt {
struct hinic_hwif *hwif;
-
+ struct hinic_hwdev *hwdev;
struct semaphore sync_msg_lock;
u16 sync_msg_id;
u8 *sync_msg_buf;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index be364b7a7019..fcf7bfe4aa47 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -108,7 +108,12 @@ void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
+ /* If only one page, use 0-level CLA */
+ if (wq->num_q_pages == 1)
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr);
+ else
+ wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
+
wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
@@ -638,6 +643,7 @@ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
/* increment prod_idx to the next */
prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ prod_idx = SQ_MASKED_IDX(sq, prod_idx);
wmb(); /* Write all before the doorbell */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 79091e131418..ca3e2d060284 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -38,12 +38,15 @@
#define HINIC_SQ_WQEBB_SIZE 64
#define HINIC_RQ_WQEBB_SIZE 32
-#define HINIC_SQ_PAGE_SIZE SZ_4K
-#define HINIC_RQ_PAGE_SIZE SZ_4K
+#define HINIC_SQ_PAGE_SIZE SZ_256K
+#define HINIC_RQ_PAGE_SIZE SZ_256K
#define HINIC_SQ_DEPTH SZ_4K
#define HINIC_RQ_DEPTH SZ_4K
+#define HINIC_MAX_QUEUE_DEPTH SZ_4K
+#define HINIC_MIN_QUEUE_DEPTH 128
+
/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
#define HINIC_RX_BUF_SZ 2048
#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 03363216ff59..5dc3743f8091 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -503,7 +503,7 @@ err_alloc_wq_pages:
* Return 0 - Success, negative - Failure
**/
int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
+ u16 wqebb_size, u32 wq_page_size, u16 q_depth,
u16 max_wqe_size)
{
struct hinic_hwif *hwif = wqs->hwif;
@@ -600,7 +600,7 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
**/
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
+ int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
@@ -768,7 +768,10 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*prod_idx = curr_prod_idx;
- if (curr_pg != end_pg) {
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 811eef744140..b06f8c0255de 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -26,7 +26,7 @@ struct hinic_wq {
int block_idx;
u16 wqebb_size;
- u16 wq_page_size;
+ u32 wq_page_size;
u16 q_depth;
u16 max_wqe_size;
u16 num_wqebbs_per_page;
@@ -76,7 +76,7 @@ struct hinic_cmdq_pages {
int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
+ int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
u16 q_depth, u16 max_wqe_size);
void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
@@ -88,7 +88,7 @@ int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
void hinic_wqs_free(struct hinic_wqs *wqs);
int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
+ u16 wqebb_size, u32 wq_page_size, u16 q_depth,
u16 max_wqe_size);
void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 63b92f6cc856..e9e6f4c9309a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -29,6 +29,7 @@
#include "hinic_tx.h"
#include "hinic_rx.h"
#include "hinic_dev.h"
+#include "hinic_sriov.h"
MODULE_AUTHOR("Huawei Technologies CO., Ltd");
MODULE_DESCRIPTION("Huawei Intelligent NIC driver");
@@ -46,6 +47,7 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200
#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205
#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210
+#define HINIC_DEV_ID_VF 0x375e
#define HINIC_WQ_NAME "hinic_dev"
@@ -65,6 +67,8 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define rx_mode_work_to_nic_dev(rx_mode_work) \
container_of(rx_mode_work, struct hinic_dev, rx_mode_work)
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
static int change_mac_addr(struct net_device *netdev, const u8 *addr);
static int set_features(struct hinic_dev *nic_dev,
@@ -322,7 +326,6 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
int i, node, err = 0;
u16 num_cpus = 0;
- nic_dev->max_qps = hinic_hwdev_max_num_qps(hwdev);
if (nic_dev->max_qps <= 1) {
nic_dev->flags &= ~HINIC_RSS_ENABLE;
nic_dev->rss_limit = nic_dev->max_qps;
@@ -368,14 +371,15 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
netif_err(nic_dev, drv, netdev, "Failed to init rss\n");
}
-static int hinic_open(struct net_device *netdev)
+int hinic_open(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
enum hinic_port_link_state link_state;
int err, ret;
if (!(nic_dev->flags & HINIC_INTF_UP)) {
- err = hinic_hwdev_ifup(nic_dev->hwdev);
+ err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth,
+ nic_dev->rq_depth);
if (err) {
netif_err(nic_dev, drv, netdev,
"Failed - HW interface up\n");
@@ -423,9 +427,6 @@ static int hinic_open(struct net_device *netdev)
goto err_func_port_state;
}
- /* Wait up to 3 sec between port enable to link state */
- msleep(3000);
-
down(&nic_dev->mgmt_lock);
err = hinic_port_link_state(nic_dev, &link_state);
@@ -434,6 +435,9 @@ static int hinic_open(struct net_device *netdev)
goto err_port_link;
}
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state);
+
if (link_state == HINIC_LINK_STATE_UP)
nic_dev->flags |= HINIC_LINK_UP;
@@ -479,7 +483,7 @@ err_create_txqs:
return err;
}
-static int hinic_close(struct net_device *netdev)
+int hinic_close(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
@@ -496,6 +500,9 @@ static int hinic_close(struct net_device *netdev)
up(&nic_dev->mgmt_lock);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0);
+
hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
@@ -673,7 +680,7 @@ static int hinic_vlan_rx_add_vid(struct net_device *netdev,
}
err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid);
- if (err) {
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
netif_err(nic_dev, drv, netdev, "Failed to set mac\n");
goto err_add_mac;
}
@@ -725,8 +732,6 @@ static void set_rx_mode(struct work_struct *work)
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
- netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
-
hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode);
__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
@@ -745,10 +750,12 @@ static void hinic_set_rx_mode(struct net_device *netdev)
HINIC_RX_MODE_MC |
HINIC_RX_MODE_BC;
- if (netdev->flags & IFF_PROMISC)
- rx_mode |= HINIC_RX_MODE_PROMISC;
- else if (netdev->flags & IFF_ALLMULTI)
+ if (netdev->flags & IFF_PROMISC) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ rx_mode |= HINIC_RX_MODE_PROMISC;
+ } else if (netdev->flags & IFF_ALLMULTI) {
rx_mode |= HINIC_RX_MODE_MC_ALL;
+ }
rx_mode_work->rx_mode = rx_mode;
@@ -758,8 +765,26 @@ static void hinic_set_rx_mode(struct net_device *netdev)
static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 sw_pi, hw_ci, sw_ci;
+ struct hinic_sq *sq;
+ u16 num_sqs, q_id;
+
+ num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev);
netif_err(nic_dev, drv, netdev, "Tx timeout\n");
+
+ for (q_id = 0; q_id < num_sqs; q_id++) {
+ if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id)))
+ continue;
+
+ sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id);
+ sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask;
+ hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask;
+ sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask;
+ netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n",
+ q_id, sw_pi, hw_ci, sw_ci,
+ nic_dev->txqs[q_id].napi.state);
+ }
}
static void hinic_get_stats64(struct net_device *netdev,
@@ -825,6 +850,29 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_get_stats64 = hinic_get_stats64,
.ndo_fix_features = hinic_fix_features,
.ndo_set_features = hinic_set_features,
+ .ndo_set_vf_mac = hinic_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan,
+ .ndo_get_vf_config = hinic_ndo_get_vf_config,
+ .ndo_set_vf_trust = hinic_ndo_set_vf_trust,
+ .ndo_set_vf_rate = hinic_ndo_set_vf_bw,
+ .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state,
+};
+
+static const struct net_device_ops hinicvf_netdev_ops = {
+ .ndo_open = hinic_open,
+ .ndo_stop = hinic_close,
+ .ndo_change_mtu = hinic_change_mtu,
+ .ndo_set_mac_address = hinic_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid,
+ .ndo_set_rx_mode = hinic_set_rx_mode,
+ .ndo_start_xmit = hinic_xmit_frame,
+ .ndo_tx_timeout = hinic_tx_timeout,
+ .ndo_get_stats64 = hinic_get_stats64,
+ .ndo_fix_features = hinic_fix_features,
+ .ndo_set_features = hinic_set_features,
};
static void netdev_features_init(struct net_device *netdev)
@@ -884,6 +932,10 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n");
}
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_notify_all_vfs_link_changed(nic_dev->hwdev,
+ link_status->link);
+
ret_link_status = buf_out;
ret_link_status->status = 0;
@@ -957,7 +1009,12 @@ static int nic_dev_init(struct pci_dev *pdev)
}
hinic_set_ethtool_ops(netdev);
- netdev->netdev_ops = &hinic_netdev_ops;
+
+ if (!HINIC_IS_VF(hwdev->hwif))
+ netdev->netdev_ops = &hinic_netdev_ops;
+ else
+ netdev->netdev_ops = &hinicvf_netdev_ops;
+
netdev->max_mtu = ETH_MAX_MTU;
nic_dev = netdev_priv(netdev);
@@ -969,6 +1026,11 @@ static int nic_dev_init(struct pci_dev *pdev)
nic_dev->rxqs = NULL;
nic_dev->tx_weight = tx_weight;
nic_dev->rx_weight = rx_weight;
+ nic_dev->sq_depth = HINIC_SQ_DEPTH;
+ nic_dev->rq_depth = HINIC_RQ_DEPTH;
+ nic_dev->sriov_info.hwdev = hwdev;
+ nic_dev->sriov_info.pdev = pdev;
+ nic_dev->max_qps = num_qps;
sema_init(&nic_dev->mgmt_lock, 1);
@@ -995,11 +1057,25 @@ static int nic_dev_init(struct pci_dev *pdev)
pci_set_drvdata(pdev, netdev);
err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
- if (err)
- dev_warn(&pdev->dev, "Failed to get mac address\n");
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get mac address\n");
+ goto err_get_mac;
+ }
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ dev_err(&pdev->dev, "Invalid MAC address\n");
+ err = -EIO;
+ goto err_add_mac;
+ }
+
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+ netdev->dev_addr);
+ eth_hw_addr_random(netdev);
+ }
err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0);
- if (err) {
+ if (err && err != HINIC_PF_SET_VF_ALREADY) {
dev_err(&pdev->dev, "Failed to add mac\n");
goto err_add_mac;
}
@@ -1041,6 +1117,7 @@ err_set_features:
cancel_work_sync(&rx_mode_work->work);
err_set_mtu:
+err_get_mac:
err_add_mac:
pci_set_drvdata(pdev, NULL);
destroy_workqueue(nic_dev->workq);
@@ -1114,14 +1191,41 @@ err_pci_regions:
return err;
}
+#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
+
+static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
+{
+ struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
+ u32 loop_cnt = 0;
+
+ set_bit(HINIC_FUNC_REMOVE, &sriov_info->state);
+ usleep_range(9900, 10000);
+
+ while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) {
+ if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) &&
+ !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state))
+ return;
+
+ usleep_range(9900, 10000);
+ loop_cnt++;
+ }
+}
+
static void hinic_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rx_mode_work *rx_mode_work;
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
+ wait_sriov_cfg_complete(nic_dev);
+ hinic_pci_sriov_disable(pdev);
+ }
+
unregister_netdev(netdev);
+ hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
+
hinic_hwdev_cb_unregister(nic_dev->hwdev,
HINIC_MGMT_MSG_CMD_LINK_STATUS);
@@ -1132,6 +1236,8 @@ static void hinic_remove(struct pci_dev *pdev)
destroy_workqueue(nic_dev->workq);
+ hinic_vf_func_free(nic_dev->hwdev);
+
hinic_free_hwdev(nic_dev->hwdev);
free_netdev(netdev);
@@ -1152,6 +1258,7 @@ static const struct pci_device_id hinic_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0},
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0},
{ PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0},
+ { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0},
{ 0, 0}
};
MODULE_DEVICE_TABLE(pci, hinic_pci_table);
@@ -1162,6 +1269,7 @@ static struct pci_driver hinic_driver = {
.probe = hinic_probe,
.remove = hinic_remove,
.shutdown = hinic_shutdown,
+ .sriov_configure = hinic_pci_sriov_configure,
};
module_pci_driver(hinic_driver);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 1e389a004e50..175c0ee00038 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -37,20 +37,14 @@ enum mac_op {
static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id, enum mac_op op)
{
- struct net_device *netdev = nic_dev->netdev;
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mac_cmd port_mac_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mac_cmd);
struct pci_dev *pdev = hwif->pdev;
enum hinic_port_cmd cmd;
- u16 out_size;
int err;
- if (vlan_id >= VLAN_N_VID) {
- netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n");
- return -EINVAL;
- }
-
if (op == MAC_SET)
cmd = HINIC_PORT_CMD_SET_MAC;
else
@@ -63,12 +57,25 @@ static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd,
sizeof(port_mac_cmd),
&port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
+ if (err || out_size != sizeof(port_mac_cmd) ||
+ (port_mac_cmd.status &&
+ port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY &&
+ port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) {
dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n",
port_mac_cmd.status);
return -EFAULT;
}
+ if (port_mac_cmd.status == HINIC_PF_SET_VF_ALREADY) {
+ dev_warn(&pdev->dev, "PF has already set VF mac, ignore %s operation\n",
+ (op == MAC_SET) ? "set" : "del");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (cmd == HINIC_PORT_CMD_SET_MAC && port_mac_cmd.status ==
+ HINIC_MGMT_STATUS_EXIST)
+ dev_warn(&pdev->dev, "MAC is repeated, ignore set operation\n");
+
return 0;
}
@@ -112,8 +119,8 @@ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mac_cmd port_mac_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mac_cmd);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -144,9 +151,9 @@ int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mtu_cmd port_mtu_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(port_mtu_cmd);
struct pci_dev *pdev = hwif->pdev;
int err, max_frame;
- u16 out_size;
if (new_mtu < HINIC_MIN_MTU_SIZE) {
netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size");
@@ -248,14 +255,9 @@ int hinic_port_link_state(struct hinic_dev *nic_dev,
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_port_link_cmd link_cmd;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(link_cmd);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
@@ -284,13 +286,11 @@ int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
struct hinic_port_state_cmd port_state;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(port_state);
int err;
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
+ if (HINIC_IS_VF(hwdev->hwif))
+ return 0;
port_state.state = state;
@@ -320,7 +320,7 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(func_state);
int err;
func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -351,7 +351,7 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(*port_cap);
int err;
port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif);
@@ -382,7 +382,7 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_tso_config tso_cfg = {0};
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(tso_cfg);
int err;
tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -405,9 +405,9 @@ int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
{
struct hinic_checksum_offload rx_csum_cfg = {0};
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(rx_csum_cfg);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size;
int err;
if (!hwdev)
@@ -443,6 +443,7 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en)
if (!hwdev)
return -EINVAL;
+ out_size = sizeof(vlan_cfg);
hwif = hwdev->hwif;
pdev = hwif->pdev;
vlan_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -465,14 +466,14 @@ int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
struct hinic_rq_num rq_num = { 0 };
+ struct pci_dev *pdev = hwif->pdev;
u16 out_size = sizeof(rq_num);
int err;
rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif);
rq_num.num_rqs = num_rqs;
- rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH);
+ rq_num.rq_depth = ilog2(nic_dev->rq_depth);
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP,
&rq_num, sizeof(rq_num),
@@ -491,8 +492,8 @@ static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en,
u8 max_wqe_num)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_lro_config lro_cfg = { 0 };
+ struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 out_size = sizeof(lro_cfg);
int err;
@@ -568,6 +569,9 @@ int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en,
if (err)
return err;
+ if (HINIC_IS_VF(nic_dev->hwdev->hwif))
+ return 0;
+
err = hinic_set_rx_lro_timer(nic_dev, lro_timer);
if (err)
return err;
@@ -741,9 +745,9 @@ int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx,
{
struct hinic_rss_context_table ctx_tbl = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(ctx_tbl);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(ctx_tbl);
int err;
if (!hwdev || !rss_type)
@@ -784,7 +788,7 @@ int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id,
struct hinic_hwif *hwif = hwdev->hwif;
struct hinic_rss_key rss_key = { 0 };
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_key);
int err;
rss_key.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -809,9 +813,9 @@ int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx,
{
struct hinic_rss_template_key temp_key = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(temp_key);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(temp_key);
int err;
if (!hwdev || !temp)
@@ -844,7 +848,7 @@ int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_engine);
int err;
rss_engine.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -868,9 +872,9 @@ int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, u8 *type)
{
struct hinic_rss_engine_type hash_type = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ u16 out_size = sizeof(hash_type);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size = sizeof(hash_type);
int err;
if (!hwdev || !type)
@@ -901,7 +905,7 @@ int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id)
struct hinic_rss_config rss_cfg = { 0 };
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
+ u16 out_size = sizeof(rss_cfg);
int err;
rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -927,8 +931,8 @@ int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx)
struct hinic_rss_template_mgmt template_mgmt = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(template_mgmt);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -953,8 +957,8 @@ int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx)
struct hinic_rss_template_mgmt template_mgmt = { 0 };
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
+ u16 out_size = sizeof(template_mgmt);
struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
int err;
template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif);
@@ -1043,9 +1047,9 @@ int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_version_info up_ver = {0};
+ u16 out_size = sizeof(up_ver);
struct hinic_hwif *hwif;
struct pci_dev *pdev;
- u16 out_size;
int err;
if (!hwdev)
@@ -1068,3 +1072,132 @@ int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver)
return 0;
}
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode)
+{
+ u16 out_size;
+ int err;
+
+ if (!hwdev || !link_mode)
+ return -EINVAL;
+
+ out_size = sizeof(*link_mode);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+ link_mode, sizeof(*link_mode),
+ link_mode, &out_size);
+ if (err || !out_size || link_mode->status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, link_mode->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable)
+{
+ struct hinic_set_autoneg_cmd autoneg = {0};
+ u16 out_size = sizeof(autoneg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ autoneg.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ autoneg.enable = enable;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_AUTONEG,
+ &autoneg, sizeof(autoneg),
+ &autoneg, &out_size);
+ if (err || !out_size || autoneg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n",
+ enable ? "enable" : "disable", err, autoneg.status,
+ out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed)
+{
+ struct hinic_speed_cmd speed_info = {0};
+ u16 out_size = sizeof(speed_info);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ speed_info.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+ speed_info.speed = speed;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_SPEED,
+ &speed_info, sizeof(speed_info),
+ &speed_info, &out_size);
+ if (err || !out_size || speed_info.status) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, speed_info.status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info)
+{
+ u16 out_size = sizeof(*info);
+ int err;
+
+ err = hinic_hilink_msg_cmd(hwdev, HINIC_HILINK_CMD_SET_LINK_SETTINGS,
+ info, sizeof(*info), info, &out_size);
+ if ((info->status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ info->status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, info->status, out_size);
+ return -EFAULT;
+ }
+
+ return info->status;
+}
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info)
+{
+ u16 out_size = sizeof(*pause_info);
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+ pause_info, sizeof(*pause_info),
+ pause_info, &out_size);
+ if (err || !out_size || pause_info->status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, pause_info->status, out_size);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 44772fd47fc1..661c6322dc15 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -79,6 +79,42 @@ enum hinic_speed {
HINIC_SPEED_UNKNOWN = 0xFF,
};
+enum hinic_link_mode {
+ HINIC_10GE_BASE_KR = 0,
+ HINIC_40GE_BASE_KR4 = 1,
+ HINIC_40GE_BASE_CR4 = 2,
+ HINIC_100GE_BASE_KR4 = 3,
+ HINIC_100GE_BASE_CR4 = 4,
+ HINIC_25GE_BASE_KR_S = 5,
+ HINIC_25GE_BASE_CR_S = 6,
+ HINIC_25GE_BASE_KR = 7,
+ HINIC_25GE_BASE_CR = 8,
+ HINIC_GE_BASE_KX = 9,
+ HINIC_LINK_MODE_NUMBERS,
+
+ HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+enum hinic_port_type {
+ HINIC_PORT_TP, /* BASET */
+ HINIC_PORT_AUI,
+ HINIC_PORT_MII,
+ HINIC_PORT_FIBRE, /* OPTICAL */
+ HINIC_PORT_BNC,
+ HINIC_PORT_ELEC,
+ HINIC_PORT_COPPER, /* PORT_DA */
+ HINIC_PORT_AOC,
+ HINIC_PORT_BACKPLANE,
+ HINIC_PORT_NONE = 0xEF,
+ HINIC_PORT_OTHER = 0xFF,
+};
+
+enum hinic_valid_link_settings {
+ HILINK_LINK_SET_SPEED = 0x1,
+ HILINK_LINK_SET_AUTONEG = 0x2,
+ HILINK_LINK_SET_FEC = 0x4,
+};
+
enum hinic_tso_state {
HINIC_TSO_DISABLE = 0,
HINIC_TSO_ENABLE = 1,
@@ -148,9 +184,9 @@ struct hinic_port_link_status {
u8 version;
u8 rsvd0[6];
- u16 rsvd1;
+ u16 func_id;
u8 link;
- u8 rsvd2;
+ u8 port_id;
};
struct hinic_port_func_state_cmd {
@@ -179,6 +215,50 @@ struct hinic_port_cap {
u8 rsvd2[3];
};
+struct hinic_link_mode_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represents invalid value */
+ u16 advertised;
+};
+
+struct hinic_speed_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 speed;
+};
+
+struct hinic_set_autoneg_cmd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_link_ksettings_info {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+
+ u32 valid_bitmap;
+ u32 speed; /* enum nic_speed_level */
+ u8 autoneg; /* 0 - off; 1 - on */
+ u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+ u8 rsvd2[18]; /* reserved for duplex, port, etc. */
+};
+
struct hinic_tso_config {
u8 status;
u8 version;
@@ -506,6 +586,61 @@ struct hinic_cmd_vport_stats {
struct hinic_vport_stats stats;
};
+struct hinic_tx_rate_cfg_max_min {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 min_rate;
+ u32 max_rate;
+ u8 rsvd2[8];
+};
+
+struct hinic_tx_rate_cfg {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 tx_rate;
+};
+
+enum nic_speed_level {
+ LINK_SPEED_10MB = 0,
+ LINK_SPEED_100MB,
+ LINK_SPEED_1GB,
+ LINK_SPEED_10GB,
+ LINK_SPEED_25GB,
+ LINK_SPEED_40GB,
+ LINK_SPEED_100GB,
+ LINK_SPEED_LEVELS,
+};
+
+struct hinic_spoofchk_set {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u8 state;
+ u8 rsvd1;
+ u16 func_id;
+};
+
+struct hinic_pause_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -585,4 +720,24 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
+int hinic_set_link_settings(struct hinic_hwdev *hwdev,
+ struct hinic_link_ksettings_info *info);
+
+int hinic_get_link_mode(struct hinic_hwdev *hwdev,
+ struct hinic_link_mode_cmd *link_mode);
+
+int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable);
+
+int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed);
+
+int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
+ struct hinic_pause_config *pause_info);
+
+int hinic_open(struct net_device *netdev);
+
+int hinic_close(struct net_device *netdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 815649e37cb1..af20d0dd6de7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -432,9 +432,11 @@ static int rx_poll(struct napi_struct *napi, int budget)
return budget;
napi_complete(napi);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- rq->msix_entry,
- HINIC_MSIX_ENABLE);
+
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_ENABLE);
return pkts;
}
@@ -461,9 +463,10 @@ static irqreturn_t rx_irq(int irq, void *data)
/* Disable the interrupt until napi will be completed */
nic_dev = netdev_priv(rxq->netdev);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- rq->msix_entry,
- HINIC_MSIX_DISABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ rq->msix_entry,
+ HINIC_MSIX_DISABLE);
nic_dev = netdev_priv(rxq->netdev);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
new file mode 100644
index 000000000000..efab2dd2c889
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -0,0 +1,1294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic_hw_dev.h"
+#include "hinic_dev.h"
+#include "hinic_hw_mbox.h"
+#include "hinic_hw_cmdq.h"
+#include "hinic_port.h"
+#include "hinic_sriov.h"
+
+static unsigned char set_vf_link_state;
+module_param(set_vf_link_state, byte, 0444);
+MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0.");
+
+#define HINIC_VLAN_PRIORITY_SHIFT 13
+#define HINIC_ADD_VLAN_IN_MAC 0x8000
+#define HINIC_TX_RATE_TABLE_FULL 12
+
+static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
+ u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_cmd mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ mac_info.func_idx = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.mac, mac_addr, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+ if (err || out_size != sizeof(mac_info) ||
+ (mac_info.status && mac_info.status != HINIC_PF_SET_VF_ALREADY &&
+ mac_info.status != HINIC_MGMT_STATUS_EXIST)) {
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to change MAC, ret = %d\n",
+ mac_info.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id,
+ u8 link_status)
+{
+ struct vf_data_storage *vf_infos = hwdev->func_to_io.vf_infos;
+ struct hinic_port_link_status link = {0};
+ u16 out_size = sizeof(link);
+ int err;
+
+ if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) {
+ link.link = link_status;
+ link.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC,
+ vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT,
+ &link, sizeof(link),
+ &link, &out_size, 0);
+ if (err || !out_size || link.status)
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err,
+ link.status, out_size);
+ }
+}
+
+/* send link change event mbox msg to active vfs under the pf */
+void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
+ u8 link_status)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ u16 i;
+
+ nic_io->link_status = link_status;
+ for (i = 1; i <= nic_io->max_vfs; i++) {
+ if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced)
+ hinic_notify_vf_link_status(hwdev, i, link_status);
+ }
+}
+
+static u16 hinic_vf_info_vlanprio(struct hinic_hwdev *hwdev, int vf_id)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ u16 pf_vlan, vlanprio;
+ u8 pf_qos;
+
+ pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan;
+ pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos;
+ vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT;
+
+ return vlanprio;
+}
+
+static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid,
+ u8 qos, int vf_id)
+{
+ struct hinic_vf_vlan_config vf_vlan = {0};
+ u16 out_size = sizeof(vf_vlan);
+ int err;
+ u8 cmd;
+
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
+ vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ vf_vlan.vlan_id = vid;
+ vf_vlan.qos = qos;
+
+ if (add)
+ cmd = HINIC_PORT_CMD_SET_VF_VLAN;
+ else
+ cmd = HINIC_PORT_CMD_CLR_VF_VLAN;
+
+ err = hinic_port_msg_cmd(hwdev, cmd, &vf_vlan,
+ sizeof(vf_vlan), &vf_vlan, &out_size);
+ if (err || !out_size || vf_vlan.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate_max_min(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg_max_min rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.max_rate = max_rate;
+ rate_cfg.min_rate = min_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+ rate_cfg.status) || err || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err,
+ rate_cfg.status, out_size);
+ return -EIO;
+ }
+
+ if (!rate_cfg.status) {
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate;
+ }
+
+ return rate_cfg.status;
+}
+
+static int hinic_set_vf_rate_limit(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 tx_rate)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct hinic_tx_rate_cfg rate_cfg = {0};
+ u16 out_size = sizeof(rate_cfg);
+ int err;
+
+ rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ rate_cfg.tx_rate = tx_rate;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_RATE,
+ &rate_cfg, sizeof(rate_cfg), &rate_cfg,
+ &out_size);
+ if (err || !out_size || rate_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status,
+ out_size);
+ if (rate_cfg.status)
+ return rate_cfg.status;
+
+ return -EIO;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0;
+
+ return 0;
+}
+
+static int hinic_set_vf_tx_rate(struct hinic_hwdev *hwdev, u16 vf_id,
+ u32 max_rate, u32 min_rate)
+{
+ int err;
+
+ err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate);
+ if (err != HINIC_MGMT_CMD_UNSUPPORTED)
+ return err;
+
+ if (min_rate) {
+ dev_err(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate\n");
+ return -EOPNOTSUPP;
+ }
+
+ dev_info(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate, force min_tx_rate = max_tx_rate\n");
+
+ return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate);
+}
+
+static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 func_id, vlan_id;
+ int err = 0;
+
+ vf_info = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac) {
+ func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+
+ vlan_id = 0;
+
+ err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id,
+ func_id);
+ if (err) {
+ dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set VF %d MAC\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (hinic_vf_info_vlanprio(hwdev, vf_id)) {
+ err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan,
+ vf_info->pf_qos, vf_id);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to add VF %d VLAN_QOS\n",
+ HW_VF_ID_TO_OS(vf_id));
+ return err;
+ }
+ }
+
+ if (vf_info->max_rate) {
+ err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate,
+ vf_info->min_rate);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d max rate: %d, min rate: %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->max_rate,
+ vf_info->min_rate);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_register_vf *register_info = buf_out;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ if (vf_id > nic_io->max_vfs) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Register VF id %d exceed limit[0-%d]\n",
+ HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs));
+ register_info->status = EFAULT;
+ return -EFAULT;
+ }
+
+ *out_size = sizeof(*register_info);
+ err = hinic_init_vf_config(hw_dev, vf_id);
+ if (err) {
+ register_info->status = EFAULT;
+ return err;
+ }
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true;
+
+ return 0;
+}
+
+static int hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+
+ nic_io = &hw_dev->func_to_io;
+ *out_size = 0;
+ if (vf_id > nic_io->max_vfs)
+ return 0;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false;
+
+ return 0;
+}
+
+static int hinic_change_vf_mtu_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_hwdev *hw_dev = hwdev;
+ int err;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, buf_in,
+ in_size, buf_out, out_size);
+ if (err) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Failed to set VF %u mtu\n",
+ vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic_get_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_info = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+
+ nic_io = &dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN);
+ mac_info->status = 0;
+ *out_size = sizeof(*mac_info);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_out = buf_out;
+ struct hinic_port_mac_cmd *mac_in = buf_in;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac && !(vf_info->trust) &&
+ is_valid_ether_addr(mac_in->mac)) {
+ dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF %d MAC address\n",
+ HW_VF_ID_TO_OS(vf_id));
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_SET_MAC, buf_in,
+ in_size, buf_out, out_size);
+ if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ dev_err(&hw_dev->hwif->pdev->dev,
+ "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_del_vf_mac_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_mac_cmd *mac_out = buf_out;
+ struct hinic_port_mac_cmd *mac_in = buf_in;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct vf_data_storage *vf_info;
+ int err;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) &&
+ !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) {
+ dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF mac.\n");
+ mac_out->status = HINIC_PF_SET_VF_ALREADY;
+ *out_size = sizeof(*mac_out);
+ return 0;
+ }
+
+ err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_DEL_MAC, buf_in,
+ in_size, buf_out, out_size);
+ if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) {
+ dev_err(&hw_dev->hwif->pdev->dev, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size);
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static int hinic_get_vf_link_status_msg_handler(void *hwdev, u16 vf_id,
+ void *buf_in, u16 in_size,
+ void *buf_out, u16 *out_size)
+{
+ struct hinic_port_link_cmd *get_link = buf_out;
+ struct hinic_hwdev *hw_dev = hwdev;
+ struct vf_data_storage *vf_infos;
+ struct hinic_func_to_io *nic_io;
+ bool link_forced, link_up;
+
+ nic_io = &hw_dev->func_to_io;
+ vf_infos = nic_io->vf_infos;
+ link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced;
+ link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up;
+
+ if (link_forced)
+ get_link->state = link_up ?
+ HINIC_LINK_STATE_UP : HINIC_LINK_STATE_DOWN;
+ else
+ get_link->state = nic_io->link_status;
+
+ get_link->status = 0;
+ *out_size = sizeof(*get_link);
+
+ return 0;
+}
+
+static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = {
+ {HINIC_PORT_CMD_VF_REGISTER, hinic_register_vf_msg_handler},
+ {HINIC_PORT_CMD_VF_UNREGISTER, hinic_unregister_vf_msg_handler},
+ {HINIC_PORT_CMD_CHANGE_MTU, hinic_change_vf_mtu_msg_handler},
+ {HINIC_PORT_CMD_GET_MAC, hinic_get_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_SET_MAC, hinic_set_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_DEL_MAC, hinic_del_vf_mac_msg_handler},
+ {HINIC_PORT_CMD_GET_LINK_STATE, hinic_get_vf_link_status_msg_handler},
+};
+
+#define CHECK_IPSU_15BIT 0X8000
+
+static
+struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+ return &nic_dev->sriov_info;
+}
+
+static int hinic_check_mac_info(u8 status, u16 vlan_id)
+{
+ if ((status && status != HINIC_MGMT_STATUS_EXIST &&
+ status != HINIC_PF_SET_VF_ALREADY) ||
+ (vlan_id & CHECK_IPSU_15BIT &&
+ status == HINIC_MGMT_STATUS_EXIST))
+ return -EINVAL;
+
+ return 0;
+}
+
+#define HINIC_VLAN_ID_MASK 0x7FFF
+
+static int hinic_update_mac(struct hinic_hwdev *hwdev, u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct hinic_port_mac_update mac_info = {0};
+ u16 out_size = sizeof(mac_info);
+ int err;
+
+ if (!hwdev || !old_mac || !new_mac)
+ return -EINVAL;
+
+ if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(&hwdev->hwif->pdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ memcpy(mac_info.old_mac, old_mac, ETH_ALEN);
+ memcpy(mac_info.new_mac, new_mac, ETH_ALEN);
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_UPDATE_MAC, &mac_info,
+ sizeof(mac_info), &mac_info, &out_size);
+
+ if (err || !out_size ||
+ hinic_check_mac_info(mac_info.status, mac_info.vlan_id)) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, mac_info.status, out_size);
+ return -EINVAL;
+ }
+
+ if (mac_info.status == HINIC_PF_SET_VF_ALREADY) {
+ dev_warn(&hwdev->hwif->pdev->dev,
+ "PF has already set VF MAC. Ignore update operation\n");
+ return HINIC_PF_SET_VF_ALREADY;
+ }
+
+ if (mac_info.status == HINIC_MGMT_STATUS_EXIST)
+ dev_warn(&hwdev->hwif->pdev->dev, "MAC is repeated. Ignore update operation\n");
+
+ return 0;
+}
+
+static void hinic_get_vf_config(struct hinic_hwdev *hwdev, u16 vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct vf_data_storage *vfinfo;
+
+ vfinfo = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+
+ ivi->vf = HW_VF_ID_TO_OS(vf_id);
+ memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN);
+ ivi->vlan = vfinfo->pf_vlan;
+ ivi->qos = vfinfo->pf_qos;
+ ivi->spoofchk = vfinfo->spoofchk;
+ ivi->trusted = vfinfo->trust;
+ ivi->max_tx_rate = vfinfo->max_rate;
+ ivi->min_tx_rate = vfinfo->min_rate;
+
+ if (!vfinfo->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vfinfo->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+}
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi);
+
+ return 0;
+}
+
+static int hinic_set_vf_mac(struct hinic_hwdev *hwdev, int vf,
+ unsigned char *mac_addr)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_info;
+ u16 func_id;
+ int err;
+
+ vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf);
+
+ /* duplicate request, so just return success */
+ if (vf_info->pf_set_mac &&
+ !memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN))
+ return 0;
+
+ vf_info->pf_set_mac = true;
+
+ func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf;
+ err = hinic_update_mac(hwdev, vf_info->vf_mac_addr,
+ mac_addr, 0, func_id);
+ if (err) {
+ vf_info->pf_set_mac = false;
+ return err;
+ }
+
+ memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (!is_valid_ether_addr(mac) || vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac);
+ if (err)
+ return err;
+
+ netif_info(nic_dev, drv, netdev, "Setting MAC %pM on VF %d\n", mac, vf);
+ netif_info(nic_dev, drv, netdev, "Reload the VF driver to make this change effective.");
+
+ return 0;
+}
+
+static int hinic_add_vf_vlan(struct hinic_hwdev *hwdev, int vf_id,
+ u16 vlan, u8 qos)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hwdev, true, vlan, qos, vf_id);
+ if (err)
+ return err;
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos;
+
+ dev_info(&hwdev->hwif->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan, qos, HW_VF_ID_TO_OS(vf_id));
+ return 0;
+}
+
+static int hinic_kill_vf_vlan(struct hinic_hwdev *hwdev, int vf_id)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ int err;
+
+ err = hinic_set_vf_vlan(hwdev, false,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos,
+ vf_id);
+ if (err)
+ return err;
+
+ dev_info(&hwdev->hwif->pdev->dev, "Remove VLAN %d on VF %d\n",
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan,
+ HW_VF_ID_TO_OS(vf_id));
+
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0;
+ nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0;
+
+ return 0;
+}
+
+static int hinic_update_mac_vlan(struct hinic_dev *nic_dev, u16 old_vlan,
+ u16 new_vlan, int vf_id)
+{
+ struct vf_data_storage *vf_info;
+ u16 vlan_id;
+ int err;
+
+ if (!nic_dev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID)
+ return -EINVAL;
+
+ vf_info = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (!vf_info->pf_set_mac)
+ return 0;
+
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+
+ err = hinic_port_del_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+ if (err) {
+ dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to delete VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, old_vlan);
+ return err;
+ }
+
+ vlan_id = new_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+
+ err = hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+ if (err) {
+ dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to add VF %d MAC %pM vlan %d\n",
+ HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, new_vlan);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ vlan_id = old_vlan;
+ if (vlan_id)
+ vlan_id |= HINIC_ADD_VLAN_IN_MAC;
+ hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id);
+
+ return err;
+}
+
+static int set_hw_vf_vlan(struct hinic_dev *nic_dev,
+ u16 cur_vlanprio, int vf, u16 vlan, u8 qos)
+{
+ u16 old_vlan = cur_vlanprio & VLAN_VID_MASK;
+ int err = 0;
+
+ if (vlan || qos) {
+ if (cur_vlanprio) {
+ err = hinic_kill_vf_vlan(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d old vlan %d\n",
+ vf, old_vlan);
+ goto out;
+ }
+ }
+ err = hinic_add_vf_vlan(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf), vlan, qos);
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to add vf %d new vlan %d\n",
+ vf, vlan);
+ goto out;
+ }
+ } else {
+ err = hinic_kill_vf_vlan(nic_dev->hwdev, OS_VF_ID_TO_HW(vf));
+ if (err) {
+ dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d vlan %d\n",
+ vf, old_vlan);
+ goto out;
+ }
+ }
+
+ err = hinic_update_mac_vlan(nic_dev, old_vlan, vlan,
+ OS_VF_ID_TO_HW(vf));
+
+out:
+ return err;
+}
+
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ u16 vlanprio, cur_vlanprio;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+ vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT;
+ cur_vlanprio = hinic_vf_info_vlanprio(nic_dev->hwdev,
+ OS_VF_ID_TO_HW(vf));
+ /* duplicate request, so just return success */
+ if (vlanprio == cur_vlanprio)
+ return 0;
+
+ return set_hw_vf_vlan(nic_dev, cur_vlanprio, vf, vlan, qos);
+}
+
+static int hinic_set_vf_trust(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool trust)
+{
+ struct vf_data_storage *vf_infos;
+ struct hinic_func_to_io *nic_io;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ nic_io = &hwdev->func_to_io;
+ vf_infos = nic_io->vf_infos;
+ vf_infos[vf_id].trust = trust;
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *adapter = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ struct hinic_func_to_io *nic_io;
+ bool cur_trust;
+ int err;
+
+ sriov_info = &adapter->sriov_info;
+ nic_io = &adapter->hwdev->func_to_io;
+
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_trust = nic_io->vf_infos[vf].trust;
+ /* same request, so just return success */
+ if ((setting && cur_trust) || (!setting && !cur_trust))
+ return 0;
+
+ err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
+ if (!err)
+ dev_info(&sriov_info->pdev->dev, "Set VF %d trusted %s succeed\n",
+ vf, setting ? "on" : "off");
+ else
+ dev_err(&sriov_info->pdev->dev, "Failed set VF %d trusted %s\n",
+ vf, setting ? "on" : "off");
+
+ return err;
+}
+
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate)
+{
+ u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000,
+ SPEED_25000, SPEED_40000, SPEED_100000};
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_port_cap port_cap = { 0 };
+ enum hinic_port_link_state link_state;
+ int err;
+
+ if (vf >= nic_dev->sriov_info.num_vfs) {
+ netif_err(nic_dev, drv, netdev, "VF number must be less than %d\n",
+ nic_dev->sriov_info.num_vfs);
+ return -EINVAL;
+ }
+
+ if (max_tx_rate < min_tx_rate) {
+ netif_err(nic_dev, drv, netdev, "Max rate %d must be greater than or equal to min rate %d\n",
+ max_tx_rate, min_tx_rate);
+ return -EINVAL;
+ }
+
+ err = hinic_port_link_state(nic_dev, &link_state);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Get link status failed when setting vf tx rate\n");
+ return -EIO;
+ }
+
+ if (link_state == HINIC_LINK_STATE_DOWN) {
+ netif_err(nic_dev, drv, netdev,
+ "Link status must be up when setting vf tx rate\n");
+ return -EPERM;
+ }
+
+ err = hinic_port_get_cap(nic_dev, &port_cap);
+ if (err || port_cap.speed > LINK_SPEED_100GB)
+ return -EIO;
+
+ /* rate limit cannot be less than 0 and greater than link speed */
+ if (max_tx_rate < 0 || max_tx_rate > speeds[port_cap.speed]) {
+ netif_err(nic_dev, drv, netdev, "Max tx rate must be in [0 - %d]\n",
+ speeds[port_cap.speed]);
+ return -EINVAL;
+ }
+
+ err = hinic_set_vf_tx_rate(nic_dev->hwdev, OS_VF_ID_TO_HW(vf),
+ max_tx_rate, min_tx_rate);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Unable to set VF %d max rate %d min rate %d%s\n",
+ vf, max_tx_rate, min_tx_rate,
+ err == HINIC_TX_RATE_TABLE_FULL ?
+ ", tx rate profile is full" : "");
+ return -EIO;
+ }
+
+ netif_info(nic_dev, drv, netdev,
+ "Set VF %d max tx rate %d min tx rate %d successfully\n",
+ vf, max_tx_rate, min_tx_rate);
+
+ return 0;
+}
+
+static int hinic_set_vf_spoofchk(struct hinic_hwdev *hwdev, u16 vf_id,
+ bool spoofchk)
+{
+ struct hinic_spoofchk_set spoofchk_cfg = {0};
+ struct vf_data_storage *vf_infos = NULL;
+ u16 out_size = sizeof(spoofchk_cfg);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vf_infos = hwdev->func_to_io.vf_infos;
+
+ spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
+ spoofchk_cfg.state = spoofchk ? 1 : 0;
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ENABLE_SPOOFCHK,
+ &spoofchk_cfg, sizeof(spoofchk_cfg),
+ &spoofchk_cfg, &out_size);
+ if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || spoofchk_cfg.status) {
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n",
+ HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status,
+ out_size);
+ err = -EIO;
+ }
+
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk;
+
+ return err;
+}
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+ bool cur_spoofchk;
+ int err;
+
+ sriov_info = &nic_dev->sriov_info;
+ if (vf >= sriov_info->num_vfs)
+ return -EINVAL;
+
+ cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
+
+ /* same request, so just return success */
+ if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ return 0;
+
+ err = hinic_set_vf_spoofchk(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf), setting);
+
+ if (!err) {
+ netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n",
+ vf, setting ? "on" : "off");
+ } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
+ netif_err(nic_dev, drv, netdev,
+ "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n");
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int hinic_set_vf_link_state(struct hinic_hwdev *hwdev, u16 vf_id,
+ int link)
+{
+ struct hinic_func_to_io *nic_io = &hwdev->func_to_io;
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+ u8 link_status = 0;
+
+ switch (link) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ?
+ true : false;
+ link_status = nic_io->link_status;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true;
+ link_status = HINIC_LINK_UP;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true;
+ vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false;
+ link_status = HINIC_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Notify the VF of its new link state */
+ hinic_notify_vf_link_status(hwdev, vf_id, link_status);
+
+ return 0;
+}
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = &nic_dev->sriov_info;
+
+ if (vf_id >= sriov_info->num_vfs) {
+ netif_err(nic_dev, drv, netdev,
+ "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ return hinic_set_vf_link_state(sriov_info->hwdev,
+ OS_VF_ID_TO_HW(vf_id), link);
+}
+
+/* pf receive message from vf */
+static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct vf_cmd_msg_handle *vf_msg_handle;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_func_to_io *nic_io;
+ struct hinic_pfhwdev *pfhwdev;
+ int err = 0;
+ u32 i;
+
+ if (!hwdev)
+ return -EFAULT;
+
+ pfhwdev = container_of(dev, struct hinic_pfhwdev, hwdev);
+ nic_io = &dev->func_to_io;
+ for (i = 0; i < ARRAY_SIZE(nic_vf_cmd_msg_handler); i++) {
+ vf_msg_handle = &nic_vf_cmd_msg_handler[i];
+ if (cmd == vf_msg_handle->cmd &&
+ vf_msg_handle->cmd_msg_handler) {
+ err = vf_msg_handle->cmd_msg_handler(hwdev, vf_id,
+ buf_in, in_size,
+ buf_out,
+ out_size);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(nic_vf_cmd_msg_handler))
+ err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
+ cmd, buf_in, in_size, buf_out,
+ out_size, HINIC_MGMT_MSG_SYNC);
+
+ if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
+ dev_err(&nic_io->hwif->pdev->dev, "PF receive VF L2NIC cmd: %d process error, err:%d\n",
+ cmd, err);
+ return err;
+}
+
+static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size)
+{
+ struct hinic_dev_cap *dev_cap = buf_out;
+ struct hinic_hwdev *dev = hwdev;
+ struct hinic_cap *cap;
+
+ cap = &dev->nic_cap;
+ memset(dev_cap, 0, sizeof(*dev_cap));
+
+ dev_cap->max_vf = cap->max_vf;
+ dev_cap->max_sqs = cap->max_vf_qps;
+ dev_cap->max_rqs = cap->max_vf_qps;
+
+ *out_size = sizeof(*dev_cap);
+
+ return 0;
+}
+
+static int hinic_init_vf_infos(struct hinic_func_to_io *nic_io, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos = nic_io->vf_infos;
+
+ if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) {
+ dev_warn(&nic_io->hwif->pdev->dev, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n",
+ set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO);
+ set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO;
+ }
+
+ switch (set_vf_link_state) {
+ case HINIC_IFLA_VF_LINK_STATE_AUTO:
+ vf_infos[vf_id].link_forced = false;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_ENABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = true;
+ break;
+ case HINIC_IFLA_VF_LINK_STATE_DISABLE:
+ vf_infos[vf_id].link_forced = true;
+ vf_infos[vf_id].link_up = false;
+ break;
+ default:
+ dev_err(&nic_io->hwif->pdev->dev, "Invalid input parameter set_vf_link_state: %d\n",
+ set_vf_link_state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
+{
+ struct vf_data_storage *vf_infos;
+ u16 func_id;
+
+ func_id = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + vf_id;
+ vf_infos = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
+ if (vf_infos->pf_set_mac)
+ hinic_port_del_mac(nic_dev, vf_infos->vf_mac_addr, 0);
+
+ if (hinic_vf_info_vlanprio(nic_dev->hwdev, vf_id))
+ hinic_kill_vf_vlan(nic_dev->hwdev, vf_id);
+
+ if (vf_infos->max_rate)
+ hinic_set_vf_tx_rate(nic_dev->hwdev, vf_id, 0, 0);
+
+ if (vf_infos->spoofchk)
+ hinic_set_vf_spoofchk(nic_dev->hwdev, vf_id, false);
+
+ if (vf_infos->trust)
+ hinic_set_vf_trust(nic_dev->hwdev, vf_id, false);
+
+ memset(vf_infos, 0, sizeof(*vf_infos));
+ /* set vf_infos to default */
+ hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
+}
+
+static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
+{
+ struct hinic_dev *nic_dev;
+ u16 func_idx, idx;
+
+ nic_dev = container_of(sriov_info, struct hinic_dev, sriov_info);
+
+ for (idx = start_vf_id; idx <= end_vf_id; idx++) {
+ func_idx = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + idx;
+ hinic_set_wq_page_size(nic_dev->hwdev, func_idx,
+ HINIC_HW_WQ_PAGE_SIZE);
+ hinic_clear_vf_infos(nic_dev, idx);
+ }
+
+ return 0;
+}
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf register_info = {0};
+ u16 out_size = sizeof(register_info);
+ struct hinic_func_to_io *nic_io;
+ int err = 0;
+ u32 size, i;
+
+ nic_io = &hwdev->func_to_io;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_REGISTER,
+ &register_info, sizeof(register_info),
+ &register_info, &out_size, 0);
+ if (err || register_info.status || !out_size) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, register_info.status, out_size);
+ hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ return -EIO;
+ }
+ } else {
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_CFGM,
+ cfg_mbx_pf_proc_vf_msg);
+ if (err) {
+ dev_err(&hwdev->hwif->pdev->dev,
+ "Register PF mailbox callback failed\n");
+ return err;
+ }
+ nic_io->max_vfs = hwdev->nic_cap.max_vf;
+ size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs;
+ if (size != 0) {
+ nic_io->vf_infos = kzalloc(size, GFP_KERNEL);
+ if (!nic_io->vf_infos) {
+ err = -ENOMEM;
+ goto out_free_nic_io;
+ }
+
+ for (i = 0; i < nic_io->max_vfs; i++) {
+ err = hinic_init_vf_infos(nic_io, i);
+ if (err)
+ goto err_init_vf_infos;
+ }
+
+ err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC,
+ nic_pf_mbox_handler);
+ if (err)
+ goto err_register_pf_mbox_cb;
+ }
+ }
+
+ return 0;
+
+err_register_pf_mbox_cb:
+err_init_vf_infos:
+ kfree(nic_io->vf_infos);
+out_free_nic_io:
+ return err;
+}
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_register_vf unregister = {0};
+ u16 out_size = sizeof(unregister);
+ int err;
+
+ if (HINIC_IS_VF(hwdev->hwif)) {
+ err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC,
+ HINIC_PORT_CMD_VF_UNREGISTER,
+ &unregister, sizeof(unregister),
+ &unregister, &out_size, 0);
+ if (err || !out_size || unregister.status)
+ dev_err(&hwdev->hwif->pdev->dev, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n",
+ err, unregister.status, out_size);
+ } else {
+ if (hwdev->func_to_io.vf_infos) {
+ hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
+ kfree(hwdev->func_to_io.vf_infos);
+ }
+ }
+}
+
+static int hinic_init_vf_hw(struct hinic_hwdev *hwdev, u16 start_vf_id,
+ u16 end_vf_id)
+{
+ u16 i, func_idx;
+ int err;
+
+ /* vf use 256K as default wq page size, and can't change it */
+ for (i = start_vf_id; i <= end_vf_id; i++) {
+ func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + i;
+ err = hinic_set_wq_page_size(hwdev, func_idx,
+ HINIC_DEFAULT_WQ_PAGE_SIZE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic_pci_sriov_disable(struct pci_dev *pdev)
+{
+ struct hinic_sriov_info *sriov_info;
+ u16 tmp_vfs;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+ /* if SR-IOV is already disabled then nothing will be done */
+ if (!sriov_info->sriov_enabled)
+ return 0;
+
+ set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(sriov_info->pdev)) {
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+ dev_warn(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ }
+ sriov_info->sriov_enabled = false;
+
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(sriov_info->pdev);
+
+ tmp_vfs = (u16)sriov_info->num_vfs;
+ sriov_info->num_vfs = 0;
+ hinic_deinit_vf_hw(sriov_info, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW(tmp_vfs - 1));
+
+ clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state);
+
+ return 0;
+}
+
+int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+ int err;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(pdev);
+
+ if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) {
+ dev_err(&pdev->dev,
+ "SR-IOV enable in process, please wait, num_vfs %d\n",
+ num_vfs);
+ return -EPERM;
+ }
+
+ err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0),
+ OS_VF_ID_TO_HW((u16)num_vfs - 1));
+ if (err) {
+ dev_err(&sriov_info->pdev->dev,
+ "Failed to init vf in hardware before enable sriov, error %d\n",
+ err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ err = pci_enable_sriov(sriov_info->pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to enable SR-IOV, error %d\n", err);
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+ return err;
+ }
+
+ sriov_info->sriov_enabled = true;
+ sriov_info->num_vfs = num_vfs;
+ clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state);
+
+ return num_vfs;
+}
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct hinic_sriov_info *sriov_info;
+
+ sriov_info = hinic_get_sriov_info_by_pcidev(dev);
+
+ if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state))
+ return -EBUSY;
+
+ if (!num_vfs)
+ return hinic_pci_sriov_disable(dev);
+ else
+ return hinic_pci_sriov_enable(dev, num_vfs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
new file mode 100644
index 000000000000..ba627a362f9a
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_SRIOV_H
+#define HINIC_SRIOV_H
+
+#include "hinic_hw_dev.h"
+
+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
+
+enum hinic_sriov_state {
+ HINIC_SRIOV_DISABLE,
+ HINIC_SRIOV_ENABLE,
+ HINIC_FUNC_REMOVE,
+};
+
+enum {
+ HINIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
+ HINIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */
+ HINIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */
+};
+
+struct hinic_sriov_info {
+ struct pci_dev *pdev;
+ struct hinic_hwdev *hwdev;
+ bool sriov_enabled;
+ unsigned int num_vfs;
+ unsigned long state;
+};
+
+struct vf_data_storage {
+ u8 vf_mac_addr[ETH_ALEN];
+ bool registered;
+ bool pf_set_mac;
+ u16 pf_vlan;
+ u8 pf_qos;
+ u32 max_rate;
+ u32 min_rate;
+
+ bool link_forced;
+ bool link_up; /* only valid if VF link is forced */
+ bool spoofchk;
+ bool trust;
+};
+
+struct hinic_register_vf {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct hinic_port_mac_update {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vf_vlan_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 vlan_id;
+ u8 qos;
+ u8 rsvd1[7];
+};
+
+int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+
+int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
+
+int hinic_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
+
+int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_bw(struct net_device *netdev,
+ int vf, int min_tx_rate, int max_tx_rate);
+
+int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+
+int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
+void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
+ u8 link_status);
+
+int hinic_pci_sriov_disable(struct pci_dev *dev);
+
+int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
+
+int hinic_vf_func_init(struct hinic_hwdev *hwdev);
+
+void hinic_vf_func_free(struct hinic_hwdev *hwdev);
+
+int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 365016450bdb..4c66a0bc1b28 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -673,9 +673,11 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
if (pkts < budget) {
napi_complete(napi);
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- sq->msix_entry,
- HINIC_MSIX_ENABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ sq->msix_entry,
+ HINIC_MSIX_ENABLE);
+
return pkts;
}
@@ -701,10 +703,11 @@ static irqreturn_t tx_irq(int irq, void *data)
nic_dev = netdev_priv(txq->netdev);
- /* Disable the interrupt until napi will be completed */
- hinic_hwdev_set_msix_state(nic_dev->hwdev,
- txq->sq->msix_entry,
- HINIC_MSIX_DISABLE);
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ /* Disable the interrupt until napi will be completed */
+ hinic_hwdev_set_msix_state(nic_dev->hwdev,
+ txq->sq->msix_entry,
+ HINIC_MSIX_DISABLE);
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 48428d6a00be..623e516a9630 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3960,7 +3960,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
* @hw: Struct containing variables accessed by shared code
*
* Reads the first 64 16 bit words of the EEPROM and sums the values read.
- * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
* valid.
*/
s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 0d51cbc88028..d9fa4600f745 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter)
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
- e1000_down(adapter);
- e1000_up(adapter);
+
+ /* only run the task if not already down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ }
+
clear_bit(__E1000_RESETTING, &adapter->flags);
}
@@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
int count = E1000_CHECK_RESET_COUNT;
- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
usleep_range(10000, 20000);
- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ WARN_ON(count < 0);
+
+ /* signal that we're down so that the reset task will no longer run */
+ set_bit(__E1000_DOWN, &adapter->flags);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+
e1000_down(adapter);
e1000_power_down_phy(adapter);
e1000_free_irq(adapter);
@@ -3136,8 +3146,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
+ case e1000_82544: {
unsigned int pull_size;
- case e1000_82544:
+
/* Make sure we have room to chop off 4 bytes,
* and that the end alignment will work out to
* this hardware's requirements
@@ -3158,6 +3169,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
len = skb_headlen(skb);
break;
+ }
default:
/* do nothing */
break;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 37a2314d3e6b..944abd5eae11 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -576,7 +576,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
#define er32(reg) __er32(hw, E1000_##reg)
-s32 __ew32_prepare(struct e1000_hw *hw);
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 735bf25952fc..f999cca37a8a 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -300,7 +300,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
* so forcibly disable it.
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
- e1000_disable_ulp_lpt_lp(hw, true);
+ ret_val = e1000_disable_ulp_lpt_lp(hw, true);
+ if (ret_val) {
+ e_warn("Failed to disable ULP\n");
+ goto out;
+ }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2730b1c7dddb..a279f4fa9962 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -107,6 +107,45 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
+struct e1000e_me_supported {
+ u16 device_id; /* supported device ID */
+};
+
+static const struct e1000e_me_supported me_supported[] = {
+ {E1000_DEV_ID_PCH_LPT_I217_LM},
+ {E1000_DEV_ID_PCH_LPTLP_I218_LM},
+ {E1000_DEV_ID_PCH_I218_LM2},
+ {E1000_DEV_ID_PCH_I218_LM3},
+ {E1000_DEV_ID_PCH_SPT_I219_LM},
+ {E1000_DEV_ID_PCH_SPT_I219_LM2},
+ {E1000_DEV_ID_PCH_LBG_I219_LM3},
+ {E1000_DEV_ID_PCH_SPT_I219_LM4},
+ {E1000_DEV_ID_PCH_SPT_I219_LM5},
+ {E1000_DEV_ID_PCH_CNP_I219_LM6},
+ {E1000_DEV_ID_PCH_CNP_I219_LM7},
+ {E1000_DEV_ID_PCH_ICP_I219_LM8},
+ {E1000_DEV_ID_PCH_ICP_I219_LM9},
+ {E1000_DEV_ID_PCH_CMP_I219_LM10},
+ {E1000_DEV_ID_PCH_CMP_I219_LM11},
+ {E1000_DEV_ID_PCH_CMP_I219_LM12},
+ {E1000_DEV_ID_PCH_TGP_I219_LM13},
+ {E1000_DEV_ID_PCH_TGP_I219_LM14},
+ {E1000_DEV_ID_PCH_TGP_I219_LM15},
+ {0}
+};
+
+static bool e1000e_check_me(u16 device_id)
+{
+ struct e1000e_me_supported *id;
+
+ for (id = (struct e1000e_me_supported *)me_supported;
+ id->device_id; id++)
+ if (device_id == id->device_id)
+ return true;
+
+ return false;
+}
+
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@@ -119,14 +158,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
* has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
* and try again a number of times.
**/
-s32 __ew32_prepare(struct e1000_hw *hw)
+static void __ew32_prepare(struct e1000_hw *hw)
{
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
udelay(50);
-
- return i;
}
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
@@ -607,11 +644,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, rx_ring->tail);
- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
+ if (unlikely(i != readl(rx_ring->tail))) {
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -624,11 +661,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, tx_ring->tail);
- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
+ if (unlikely(i != readl(tx_ring->tail))) {
u32 tctl = er32(TCTL);
ew32(TCTL, tctl & ~E1000_TCTL_EN);
@@ -5294,6 +5331,10 @@ static void e1000_watchdog_task(struct work_struct *work)
/* oops */
break;
}
+ if (hw->mac.type == e1000_pch_spt) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
}
/* enable transmits in the hardware, need to do this
@@ -6404,6 +6445,31 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data |= BIT(3);
ew32(CTRL_EXT, mac_data);
+ /* Disable disconnected cable conditioning for Power Gating */
+ mac_data = er32(DPGFR);
+ mac_data |= BIT(2);
+ ew32(DPGFR, mac_data);
+
+ /* Don't wake from dynamic Power Gating with clock request */
+ mac_data = er32(FEXTNVM12);
+ mac_data |= BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data |= BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Enable K1 off to enable mPHY Power Gating */
+ mac_data = er32(FEXTNVM6);
+ mac_data |= BIT(31);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Enable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data |= BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
/* Enable the Dynamic Clock Gating in the DMA and MAC */
mac_data = er32(CTRL_EXT);
mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
@@ -6433,6 +6499,35 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data |= BIT(0);
ew32(FEXTNVM7, mac_data);
+ /* Disable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data &= ~BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Disable K1 off */
+ mac_data = er32(FEXTNVM6);
+ mac_data &= ~BIT(31);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Disable Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data &= ~BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Cancel not waking from dynamic
+ * Power Gating with clock request
+ */
+ mac_data = er32(FEXTNVM12);
+ mac_data &= ~BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Cancel disable disconnected cable conditioning
+ * for Power Gating
+ */
+ mac_data = er32(DPGFR);
+ mac_data &= ~BIT(2);
+ ew32(DPGFR, mac_data);
+
/* Disable Dynamic Power Gating */
mac_data = er32(CTRL_EXT);
mac_data &= 0xFFFFFFF7;
@@ -6858,7 +6953,8 @@ static int e1000e_pm_suspend(struct device *dev)
e1000e_pm_thaw(dev);
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp)
+ if (hw->mac.type >= e1000_pch_cnp &&
+ !e1000e_check_me(hw->adapter->pdev->device))
e1000e_s0ix_entry_flow(adapter);
return rc;
@@ -6873,7 +6969,8 @@ static int e1000e_pm_resume(struct device *dev)
int rc;
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp)
+ if (hw->mac.type >= e1000_pch_cnp &&
+ !e1000e_check_me(hw->adapter->pdev->device))
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index df59fd1d660c..8165ba2619a4 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -21,9 +21,12 @@
#define E1000_FEXTNVM5 0x00014 /* Future Extended NVM 5 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM8 0x5BB0 /* Future Extended NVM 8 - RW */
#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM12 0x5BC0 /* Future Extended NVM 12 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
+#define E1000_DPGFR 0x00FAC /* Dynamic Power Gate Force Control Register */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 37514a75f928..6a089848c857 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -694,10 +694,8 @@ init_adminq_exit:
* i40e_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
-i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
+void i40e_shutdown_adminq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
-
if (i40e_check_asq_alive(hw))
i40e_aq_queue_shutdown(hw, true);
@@ -706,8 +704,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
if (hw->nvm_buff.va)
i40e_free_virt_mem(hw, &hw->nvm_buff);
-
- return ret_code;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2a037ec244b9..5d807c8004f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -11,7 +11,7 @@
#include "i40e_diag.h"
#include "i40e_xsk.h"
#include <net/udp_tunnel.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
/* All i40e tracepoints are defined by the include below, which
* must be included exactly once across the whole kernel with
* CREATE_TRACE_POINTS defined
@@ -3260,26 +3260,31 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ kfree(ring->rx_bi);
ring->xsk_umem = i40e_xsk_umem(ring);
if (ring->xsk_umem) {
- ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ ret = i40e_alloc_rx_bi_zc(ring);
+ if (ret)
+ return ret;
+ ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
*/
chain_len = 1;
- ring->zca.free = i40e_zca_free;
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &ring->zca);
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
if (ret)
return ret;
dev_info(&vsi->back->pdev->dev,
- "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index);
} else {
+ ret = i40e_alloc_rx_bi(ring);
+ if (ret)
+ return ret;
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -3344,9 +3349,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- ok = ring->xsk_umem ?
- i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
- !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ if (ring->xsk_umem) {
+ xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
+ } else {
+ ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ }
if (!ok) {
/* Log this in case the user has forgotten to give the kernel
* any buffers, even later in the application.
@@ -14478,29 +14486,29 @@ static void i40e_print_features(struct i40e_pf *pf)
i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
#ifdef CONFIG_PCI_IOV
- i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
+ i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
- i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
+ i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis,
pf->vsi[pf->lan_vsi]->num_queue_pairs);
if (pf->flags & I40E_FLAG_RSS_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " RSS");
+ i += scnprintf(&buf[i], REMAIN(i), " RSS");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
+ i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
- i += snprintf(&buf[i], REMAIN(i), " FD_SB");
- i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
+ i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
+ i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
- i += snprintf(&buf[i], REMAIN(i), " DCB");
- i += snprintf(&buf[i], REMAIN(i), " VxLAN");
- i += snprintf(&buf[i], REMAIN(i), " Geneve");
+ i += scnprintf(&buf[i], REMAIN(i), " DCB");
+ i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
+ i += scnprintf(&buf[i], REMAIN(i), " Geneve");
if (pf->flags & I40E_FLAG_PTP)
- i += snprintf(&buf[i], REMAIN(i), " PTP");
+ i += scnprintf(&buf[i], REMAIN(i), " PTP");
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
- i += snprintf(&buf[i], REMAIN(i), " VEB");
+ i += scnprintf(&buf[i], REMAIN(i), " VEB");
else
- i += snprintf(&buf[i], REMAIN(i), " VEPA");
+ i += scnprintf(&buf[i], REMAIN(i), " VEPA");
dev_info(&pf->pdev->dev, "%s\n", buf);
kfree(buf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index bbb478f09093..5c1378641b3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -17,7 +17,7 @@
/* adminq functions */
i40e_status i40e_init_adminq(struct i40e_hw *hw);
-i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
+void i40e_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b8496037ef7f..f9555c847f73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -521,28 +521,29 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
/**
* i40e_fd_handle_status - check the Programming Status for FD
* @rx_ring: the Rx ring for this descriptor
- * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
+ * @qword0_raw: qword0
+ * @qword1: qword1 after le_to_cpu
* @prog_id: the id originally used for programming
*
* This is used to verify if the FD programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
-void i40e_fd_handle_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, u8 prog_id)
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1, u8 prog_id)
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
+ struct i40e_32b_rx_wb_qw0 *qw0;
u32 fcnt_prog, fcnt_avail;
u32 error;
- u64 qw;
- qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
+ error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
- pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
- if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
+ pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
+ if (qw0->hi_dword.fd_id != 0 ||
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
pf->fd_inv);
@@ -560,7 +561,7 @@ void i40e_fd_handle_status(struct i40e_ring *rx_ring,
/* store the current atr filter count */
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
- if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
+ if (qw0->hi_dword.fd_id == 0 &&
test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
/* These set_bit() calls aren't atomic with the
* test_bit() here, but worse case we potentially
@@ -589,7 +590,7 @@ void i40e_fd_handle_status(struct i40e_ring *rx_ring,
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
- rx_desc->wb.qword0.hi_dword.fd_id);
+ qw0->hi_dword.fd_id);
}
}
@@ -1195,6 +1196,11 @@ clear_counts:
rc->total_packets = 0;
}
+static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->rx_bi[idx];
+}
+
/**
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
@@ -1208,7 +1214,7 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
- new_buff = &rx_ring->rx_bi[nta];
+ new_buff = i40e_rx_bi(rx_ring, nta);
/* update, and store next to alloc */
nta++;
@@ -1227,29 +1233,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_rx_is_programming_status - check for programming status descriptor
- * @qw: qword representing status_error_len in CPU ordering
- *
- * The value of in the descriptor length field indicate if this
- * is a programming status descriptor for flow director or FCoE
- * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
- * it is a packet descriptor.
- **/
-static inline bool i40e_rx_is_programming_status(u64 qw)
-{
- /* The Rx filter programming status and SPH bit occupy the same
- * spot in the descriptor. Since we don't support packet split we
- * can just reuse the bit as an indication that this is a
- * programming status descriptor.
- */
- return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
-}
-
-/**
- * i40e_clean_programming_status - try clean the programming status descriptor
+ * i40e_clean_programming_status - clean the programming status descriptor
* @rx_ring: the rx ring that has this descriptor
- * @rx_desc: the rx descriptor written back by HW
- * @qw: qword representing status_error_len in CPU ordering
+ * @qword0_raw: qword0
+ * @qword1: qword1 representing status_error_len in CPU ordering
*
* Flow director should handle FD_FILTER_STATUS to check its filter programming
* status being successful or not and take actions accordingly. FCoE should
@@ -1257,34 +1244,16 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
*
* Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
**/
-struct i40e_rx_buffer *i40e_clean_programming_status(
- struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- u64 qw)
+void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1)
{
- struct i40e_rx_buffer *rx_buffer;
- u32 ntc;
u8 id;
- if (!i40e_rx_is_programming_status(qw))
- return NULL;
-
- ntc = rx_ring->next_to_clean;
-
- /* fetch, update, and store next to clean */
- rx_buffer = &rx_ring->rx_bi[ntc++];
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-
- id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
- i40e_fd_handle_status(rx_ring, rx_desc, id);
-
- return rx_buffer;
+ i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
}
/**
@@ -1336,13 +1305,25 @@ err:
return -ENOMEM;
}
+int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
+{
+ unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
+
+ rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
+ return rx_ring->rx_bi ? 0 : -ENOMEM;
+}
+
+static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
+{
+ memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
+}
+
/**
* i40e_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
**/
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
- unsigned long bi_size;
u16 i;
/* ring already cleared, nothing to do */
@@ -1361,7 +1342,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
if (!rx_bi->page)
continue;
@@ -1388,8 +1369,10 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
}
skip_free:
- bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_bi, 0, bi_size);
+ if (rx_ring->xsk_umem)
+ i40e_clear_rx_bi_zc(rx_ring);
+ else
+ i40e_clear_rx_bi(rx_ring);
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
@@ -1430,15 +1413,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int err = -ENOMEM;
- int bi_size;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_bi);
- bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
- rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
- if (!rx_ring->rx_bi)
- goto err;
+ int err;
u64_stats_init(&rx_ring->syncp);
@@ -1451,7 +1426,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
if (!rx_ring->desc) {
dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
rx_ring->size);
- goto err;
+ return -ENOMEM;
}
rx_ring->next_to_alloc = 0;
@@ -1463,16 +1438,12 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
rx_ring->queue_index);
if (err < 0)
- goto err;
+ return err;
}
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
return 0;
-err:
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
- return err;
}
/**
@@ -1507,6 +1478,22 @@ static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
}
+static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = i40e_rx_offset(rx_ring) ?
+ SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
/**
* i40e_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
@@ -1576,7 +1563,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
return false;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
+ bi = i40e_rx_bi(rx_ring, ntu);
do {
if (!i40e_alloc_mapped_page(rx_ring, bi))
@@ -1598,7 +1585,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
+ bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
@@ -1965,7 +1952,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
{
struct i40e_rx_buffer *rx_buffer;
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@@ -2180,7 +2167,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
{
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return I40E_XDP_CONSUMED;
@@ -2246,13 +2233,11 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -2335,6 +2320,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false;
struct xdp_buff xdp;
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+#endif
xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
@@ -2365,9 +2353,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc,
- qword);
- if (unlikely(rx_buffer)) {
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring,
+ rx_desc->raw.qword[0],
+ qword);
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ i40e_inc_ntc(rx_ring);
i40e_reuse_rx_page(rx_ring, rx_buffer);
cleaned_count++;
continue;
@@ -2389,7 +2380,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
xdp.data_hard_start = xdp.data -
i40e_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
+#endif
skb = i40e_run_xdp(rx_ring, &xdp);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 36d37f31a287..5c255977fd58 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -296,17 +296,9 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
dma_addr_t dma;
- union {
- struct {
- struct page *page;
- __u32 page_offset;
- __u16 pagecnt_bias;
- };
- struct {
- void *addr;
- u64 handle;
- };
- };
+ struct page *page;
+ __u32 page_offset;
+ __u16 pagecnt_bias;
};
struct i40e_queue_stats {
@@ -358,6 +350,7 @@ struct i40e_ring {
union {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
+ struct xdp_buff **rx_bi_zc;
};
DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */
@@ -419,7 +412,6 @@ struct i40e_ring {
struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq;
struct xdp_umem *xsk_umem;
- struct zero_copy_allocator zca; /* ZC allocator anchor */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -495,6 +487,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 8af0e99c6c0d..667c4dc4b39f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -4,13 +4,9 @@
#ifndef I40E_TXRX_COMMON_
#define I40E_TXRX_COMMON_
-void i40e_fd_handle_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, u8 prog_id);
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
-struct i40e_rx_buffer *i40e_clean_programming_status(
- struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- u64 qw);
+void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
+ u64 qword1);
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb);
void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
@@ -84,6 +80,38 @@ static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
}
}
+/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qword1: qword1 representing status_error_len in CPU ordering
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qword1)
+{
+ /* The Rx filter programming status and SPH bit occupy the same
+ * spot in the descriptor. Since we don't support packet split we
+ * can just reuse the bit as an indication that this is a
+ * programming status descriptor.
+ */
+ return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
+}
+
+/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static inline void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 6ea2867ff60f..63e098f7cb63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -689,7 +689,7 @@ union i40e_32byte_rx_desc {
__le64 rsvd2;
} read;
struct {
- struct {
+ struct i40e_32b_rx_wb_qw0 {
struct {
union {
__le16 mirroring_status;
@@ -727,6 +727,9 @@ union i40e_32byte_rx_desc {
} hi_dword;
} qword3;
} wb; /* writeback */
+ struct {
+ u64 qword[4];
+ } raw;
};
enum i40e_rx_desc_status_bits {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 0b7d29192b2c..7276580cbe64 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,68 +2,30 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "i40e.h"
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
-/**
- * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
- * @vsi: Current VSI
- * @umem: UMEM to DMA map
- *
- * Returns 0 on success, <0 on failure
- **/
-static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
+int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
{
- struct i40e_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i, j;
- dma_addr_t dma;
-
- dev = &pf->pdev->dev;
- for (i = 0; i < umem->npgs; i++) {
- dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
- if (dma_mapping_error(dev, dma))
- goto out_unmap;
+ unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-out_unmap:
- for (j = 0; j < i; j++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
- umem->pages[i].dma = 0;
- }
-
- return -1;
+ rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
+ return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
}
-/**
- * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev
- * @vsi: Current VSI
- * @umem: UMEM to DMA map
- **/
-static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
+void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
- struct i40e_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i;
-
- dev = &pf->pdev->dev;
-
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
+ memset(rx_ring->rx_bi_zc, 0,
+ sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
+}
- umem->pages[i].dma = 0;
- }
+static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->rx_bi_zc[idx];
}
/**
@@ -78,7 +40,6 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
- struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -92,13 +53,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
- if (!reuseq)
- return -ENOMEM;
-
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- err = i40e_xsk_umem_dma_map(vsi, umem);
+ err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
if (err)
return err;
@@ -151,7 +106,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
clear_bit(qid, vsi->af_xdp_zc_qps);
- i40e_xsk_umem_dma_unmap(vsi, umem);
+ xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -184,17 +139,13 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
*
- * This function enables or disables a UMEM to a certain ring.
- *
* Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/
static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
{
- struct xdp_umem *umem = rx_ring->xsk_umem;
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
struct bpf_prog *xdp_prog;
- u64 offset;
u32 act;
rcu_read_lock();
@@ -203,9 +154,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
*/
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- offset = xdp->data - xdp->data_hard_start;
-
- xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
switch (act) {
case XDP_PASS:
@@ -232,107 +180,26 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
return result;
}
-/**
- * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer
- * @rx_ring: Rx ring
- * @bi: Rx buffer to populate
- *
- * This function allocates an Rx buffer. The buffer can come from fill
- * queue, or via the recycle queue (next_to_alloc).
- *
- * Returns true for a successful allocation, false otherwise
- **/
-static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- void *addr = bi->addr;
- u64 handle, hr;
-
- if (addr) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- if (!xsk_umem_peek_addr(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- bi->dma = xdp_umem_get_dma(umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
-
- xsk_umem_release_addr(umem);
- return true;
-}
-
-/**
- * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer
- * @rx_ring: Rx ring
- * @bi: Rx buffer to populate
- *
- * This function allocates an Rx buffer. The buffer can come from fill
- * queue, or via the reuse queue.
- *
- * Returns true for a successful allocation, false otherwise
- **/
-static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- u64 handle, hr;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- handle &= rx_ring->xsk_umem->chunk_mask;
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- bi->dma = xdp_umem_get_dma(umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
-
- xsk_umem_release_addr_rq(umem);
- return true;
-}
-
-static __always_inline bool
-__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
- bool alloc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi))
+bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
{
u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
+ struct xdp_buff **bi, *xdp;
+ dma_addr_t dma;
bool ok = true;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
+ bi = i40e_rx_bi(rx_ring, ntu);
do {
- if (!alloc(rx_ring, bi)) {
+ xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ if (!xdp) {
ok = false;
goto no_buffers;
}
-
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
- rx_ring->rx_buf_len,
- DMA_BIDIRECTIONAL);
-
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ *bi = xdp;
+ dma = xsk_buff_xdp_get_dma(xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->read.hdr_addr = 0;
rx_desc++;
bi++;
@@ -340,11 +207,10 @@ __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
if (unlikely(ntu == rx_ring->count)) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
+ bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
- rx_desc->wb.qword1.status_error_len = 0;
count--;
} while (count);
@@ -356,127 +222,8 @@ no_buffers:
}
/**
- * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers
- * @rx_ring: Rx ring
- * @count: The number of buffers to allocate
- *
- * This function allocates a number of Rx buffers from the reuse queue
- * or fill ring and places them on the Rx ring.
- *
- * Returns true for a successful allocation, false otherwise
- **/
-bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
-{
- return __i40e_alloc_rx_buffers_zc(rx_ring, count,
- i40e_alloc_buffer_slow_zc);
-}
-
-/**
- * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers
- * @rx_ring: Rx ring
- * @count: The number of buffers to allocate
- *
- * This function allocates a number of Rx buffers from the fill ring
- * or the internal recycle mechanism and places them on the Rx ring.
- *
- * Returns true for a successful allocation, false otherwise
- **/
-static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
-{
- return __i40e_alloc_rx_buffers_zc(rx_ring, count,
- i40e_alloc_buffer_zc);
-}
-
-/**
- * i40e_get_rx_buffer_zc - Return the current Rx buffer
- * @rx_ring: Rx ring
- * @size: The size of the rx buffer (read from descriptor)
- *
- * This function returns the current, received Rx buffer, and also
- * does DMA synchronization. the Rx ring.
- *
- * Returns the received Rx buffer
- **/
-static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
- const unsigned int size)
-{
- struct i40e_rx_buffer *bi;
-
- bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- bi->dma, 0,
- size,
- DMA_BIDIRECTIONAL);
-
- return bi;
-}
-
-/**
- * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer
- * @rx_ring: Rx ring
- * @old_bi: The Rx buffer to recycle
- *
- * This function recycles a finished Rx buffer, and places it on the
- * recycle queue (next_to_alloc).
- **/
-static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *old_bi)
-{
- struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
- u16 nta = rx_ring->next_to_alloc;
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_bi->dma = old_bi->dma;
- new_bi->addr = old_bi->addr;
- new_bi->handle = old_bi->handle;
-
- old_bi->addr = NULL;
-}
-
-/**
- * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations
- * @alloc: Zero-copy allocator
- * @handle: Buffer handle
- **/
-void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
-{
- struct i40e_rx_buffer *bi;
- struct i40e_ring *rx_ring;
- u64 hr, mask;
- u16 nta;
-
- rx_ring = container_of(alloc, struct i40e_ring, zca);
- hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
- mask = rx_ring->xsk_umem->chunk_mask;
-
- nta = rx_ring->next_to_alloc;
- bi = &rx_ring->rx_bi[nta];
-
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- handle &= mask;
-
- bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
- rx_ring->xsk_umem->headroom);
-}
-
-/**
- * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer
+ * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
* @rx_ring: Rx ring
- * @bi: Rx buffer
* @xdp: xdp_buff
*
* This functions allocates a new skb from a zero-copy Rx buffer.
@@ -484,7 +231,6 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
* Returns the skb, or NULL on failure.
**/
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi,
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
@@ -503,24 +249,11 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
if (metasize)
skb_metadata_set(skb, metasize);
- i40e_reuse_rx_buffer_zc(rx_ring, bi);
+ xsk_buff_free(xdp);
return skb;
}
/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static void i40e_inc_ntc(struct i40e_ring *rx_ring)
-{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-}
-
-/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
* @budget: NAPI budget
@@ -534,20 +267,17 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
struct sk_buff *skb;
- struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
- struct i40e_rx_buffer *bi;
union i40e_rx_desc *rx_desc;
+ struct xdp_buff **bi;
unsigned int size;
u64 qword;
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- !i40e_alloc_rx_buffers_fast_zc(rx_ring,
- cleaned_count);
+ !i40e_alloc_rx_buffers_zc(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -560,35 +290,36 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- bi = i40e_clean_programming_status(rx_ring, rx_desc,
- qword);
- if (unlikely(bi)) {
- i40e_reuse_rx_buffer_zc(rx_ring, bi);
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring,
+ rx_desc->raw.qword[0],
+ qword);
+ bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ xsk_buff_free(*bi);
+ *bi = NULL;
cleaned_count++;
+ i40e_inc_ntc(rx_ring);
continue;
}
+ bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
break;
- bi = i40e_get_rx_buffer_zc(rx_ring, size);
- xdp.data = bi->addr;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + size;
- xdp.handle = bi->handle;
+ bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ (*bi)->data_end = (*bi)->data + size;
+ xsk_buff_dma_sync_for_cpu(*bi);
- xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
+ xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
if (xdp_res) {
- if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
xdp_xmit |= xdp_res;
- bi->addr = NULL;
- } else {
- i40e_reuse_rx_buffer_zc(rx_ring, bi);
- }
+ else
+ xsk_buff_free(*bi);
+ *bi = NULL;
total_rx_bytes += size;
total_rx_packets++;
@@ -604,7 +335,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
* BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
* SBP is *not* set in PRT_SBPVSI (default not set).
*/
- skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
+ skb = i40e_construct_skb_zc(rx_ring, *bi);
+ *bi = NULL;
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
break;
@@ -662,10 +394,9 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
-
- dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ desc.len);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
@@ -824,13 +555,13 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
u16 i;
for (i = 0; i < rx_ring->count; i++) {
- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
- if (!rx_bi->addr)
+ if (!rx_bi)
continue;
- xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
- rx_bi->addr = NULL;
+ xsk_buff_free(rx_bi);
+ rx_bi = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index 9ed59c14eb55..ea919a7d60ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -12,12 +12,13 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid);
-void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
struct i40e_ring *tx_ring, int napi_budget);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 29c6c6743450..980bbcc64b4b 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,10 +17,14 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx_lib.o \
ice_txrx.o \
+ ice_fltr.o \
+ ice_fdir.o \
+ ice_ethtool_fdir.o \
ice_flex_pipe.o \
ice_flow.o \
ice_devlink.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
+ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 5c11448bfbb3..5792ee616b5c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,9 +34,14 @@
#include <linux/ctype.h>
#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
+#include <linux/cpu_rmap.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
+#include <net/geneve.h>
+#include <net/gre.h>
+#include <net/udp_tunnel.h>
+#include <net/vxlan.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -46,7 +51,9 @@
#include "ice_sched.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_fdir.h"
#include "ice_xsk.h"
+#include "ice_arfs.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -62,6 +69,7 @@ extern const char ice_drv_ver[];
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
#define ICE_MIN_MSIX 2
+#define ICE_FDIR_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -90,6 +98,7 @@ extern const char ice_drv_ver[];
#define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
+#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
@@ -210,6 +219,7 @@ enum ice_state {
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
+ __ICE_FD_FLUSH_REQ,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
@@ -244,8 +254,8 @@ struct ice_vsi {
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
- int num_q_vectors;
- int base_vector; /* IRQ base for OS reserved vectors */
+ u16 num_q_vectors;
+ u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -253,6 +263,8 @@ struct ice_vsi {
s16 vf_id; /* VF ID for SR-IOV VSIs */
u16 ethtype; /* Ethernet protocol for pause frame */
+ u16 num_gfltr;
+ u16 num_bfltr;
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
@@ -261,6 +273,14 @@ struct ice_vsi {
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
+ /* aRFS members only allocated for the PF VSI */
+#define ICE_MAX_ARFS_LIST 1024
+#define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1)
+ struct hlist_head *arfs_fltr_list;
+ struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
+ spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
+ atomic_t *arfs_last_fltr_id;
+
u16 max_frame;
u16 rx_buf_len;
@@ -335,12 +355,14 @@ enum ice_pf_flags {
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
+ ICE_FLAG_FD_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
+ ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -362,11 +384,13 @@ struct ice_pf {
*/
u16 sriov_base_vector;
+ u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
+
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
- int num_alloc_vfs; /* actual number of VFs allocated */
+ u16 num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_qps_per_vf;
u16 num_msix_per_vf;
@@ -385,11 +409,11 @@ struct ice_pf {
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u32 hw_csum_rx_error;
- u32 oicr_idx; /* Other interrupt cause MSIX vector index */
- u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u16 oicr_idx; /* Other interrupt cause MSIX vector index */
+ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u32 num_lan_msix; /* Total MSIX vectors for base driver */
+ u16 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -500,8 +524,27 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
return NULL;
}
+/**
+ * ice_get_ctrl_vsi - Get the control VSI
+ * @pf: PF instance
+ */
+static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
+{
+ /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */
+ if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI)
+ return NULL;
+
+ return pf->vsi[pf->ctrl_vsi_idx];
+}
+
+#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
+#define ICE_FD_STAT_PF_IDX(base_idx) \
+ ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
+#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_open_ctrl(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
@@ -523,7 +566,24 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+const char *ice_stat_str(enum ice_status stat_err);
+const char *ice_aq_str(enum ice_aq_err aq_err);
+int
+ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
+ bool is_tun);
+void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);
+int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
+int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
+int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
+int
+ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+void ice_fdir_release_flows(struct ice_hw *hw);
+void ice_fdir_replay_flows(struct ice_hw *hw);
+void ice_fdir_replay_fltrs(struct ice_pf *pf);
+int ice_fdir_create_dflt_rules(struct ice_pf *pf);
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
+void ice_service_task_schedule(struct ice_pf *pf);
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 2381b4014ed6..92f82f2a8af4 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -107,6 +107,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_RXQS 0x0041
#define ICE_AQC_CAPS_TXQS 0x0042
#define ICE_AQC_CAPS_MSIX 0x0043
+#define ICE_AQC_CAPS_FD 0x0045
#define ICE_AQC_CAPS_MAX_MTU 0x0047
u8 major_ver;
@@ -155,13 +156,11 @@ struct ice_aqc_manage_mac_write {
#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
#define ICE_AQC_MAN_MAC_WR_S 6
-#define ICE_AQC_MAN_MAC_WR_M (3 << ICE_AQC_MAN_MAC_WR_S)
+#define ICE_AQC_MAN_MAC_WR_M ICE_M(3, ICE_AQC_MAN_MAC_WR_S)
#define ICE_AQC_MAN_MAC_UPDATE_LAA 0
-#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL (BIT(0) << ICE_AQC_MAN_MAC_WR_S)
- /* High 16 bits of MAC address in big endian order */
- __be16 sah;
- /* Low 32 bits of MAC address in big endian order */
- __be32 sal;
+#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S)
+ /* byte stream in network order */
+ u8 mac_addr[ETH_ALEN];
__le32 addr_high;
__le32 addr_low;
};
@@ -232,6 +231,11 @@ struct ice_aqc_get_sw_cfg_resp {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
+#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
+#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
@@ -240,6 +244,9 @@ struct ice_aqc_get_sw_cfg_resp {
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
+#define ICE_AQC_RES_TYPE_S 0
+#define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S)
+
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
*/
@@ -541,7 +548,7 @@ struct ice_sw_rule_lkup_rx_tx {
#define ICE_SINGLE_ACT_OTHER_ACTS 0x3
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \
- (0x3 << \ ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
+ (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
/* Bit 17:18 - Defines other actions */
/* Other action = 0 - Mirror VSI */
@@ -967,7 +974,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1059,6 +1066,25 @@ struct ice_aqc_set_phy_cfg_data {
u8 rsvd1;
};
+/* Set MAC Config command data structure (direct 0x0603) */
+struct ice_aqc_set_mac_cfg {
+ __le16 max_frame_size;
+ u8 params;
+#define ICE_AQ_SET_MAC_PACE_S 3
+#define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S)
+#define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7)
+#define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0
+#define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M
+ u8 tx_tmr_priority;
+ __le16 tx_tmr_value;
+ __le16 fc_refresh_threshold;
+ u8 drop_opts;
+#define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0)
+#define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0
+#define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0)
+ u8 reserved[7];
+};
+
/* Restart AN command data structure (direct 0x0605)
* Also used for response, with only the lport_num field present.
*/
@@ -1264,6 +1290,33 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
+/* The result of netlist NVM read comes in a TLV format. The actual data
+ * (netlist header) starts from word offset 1 (byte 2). The FW strips
+ * out the type field from the TLV header so all the netlist fields
+ * should adjust their offset value by 1 word (2 bytes) in order to map
+ * their correct location.
+ */
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M ICE_M(0x3FF, 0)
+#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
+
+/* netlist ID block field offsets (word offsets) */
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
+#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
+#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
+
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -1648,10 +1701,12 @@ struct ice_pkg_ver {
};
#define ICE_PKG_NAME_SIZE 32
+#define ICE_SEG_NAME_SIZE 28
struct ice_aqc_get_pkg_info {
struct ice_pkg_ver ver;
- char name[ICE_PKG_NAME_SIZE];
+ char name[ICE_SEG_NAME_SIZE];
+ __le32 track_id;
u8 is_in_nvm;
u8 is_active;
u8 is_active_at_boot;
@@ -1738,6 +1793,7 @@ struct ice_aq_desc {
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
+ struct ice_aqc_set_mac_cfg set_mac_cfg;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
@@ -1770,6 +1826,7 @@ enum ice_aq_err {
ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
@@ -1834,6 +1891,7 @@ enum ice_adminq_opc {
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
ice_aqc_opc_set_phy_cfg = 0x0601,
+ ice_aqc_opc_set_mac_cfg = 0x0603,
ice_aqc_opc_restart_an = 0x0605,
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
new file mode 100644
index 000000000000..6560acd76c94
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice.h"
+
+/**
+ * ice_is_arfs_active - helper to check is aRFS is active
+ * @vsi: VSI to check
+ */
+static bool ice_is_arfs_active(struct ice_vsi *vsi)
+{
+ return !!vsi->arfs_fltr_list;
+}
+
+/**
+ * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
+ * @hw: pointer to the HW structure
+ * @flow_type: flow type as Flow Director understands it
+ *
+ * Flow Director will query this function to see if aRFS is currently using
+ * the specified flow_type for perfect (4-tuple) filters.
+ */
+bool
+ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+ struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
+ struct ice_pf *pf = hw->back;
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
+
+ /* active counters can be updated by multiple CPUs */
+ smp_mb__before_atomic();
+ switch (flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
+ * @vsi: VSI that aRFS is active on
+ * @entry: aRFS entry used to change counters
+ * @add: true to increment counter, false to decrement
+ */
+static void
+ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
+ struct ice_arfs_entry *entry, bool add)
+{
+ struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
+
+ switch (entry->fltr_info.flow_type) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_udpv4_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_udpv4_cnt);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ if (add)
+ atomic_inc(&fltr_cntrs->active_udpv6_cnt);
+ else
+ atomic_dec(&fltr_cntrs->active_udpv6_cnt);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
+ entry->fltr_info.flow_type);
+ }
+}
+
+/**
+ * ice_arfs_del_flow_rules - delete the rules passed in from HW
+ * @vsi: VSI for the flow rules that need to be deleted
+ * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
+ *
+ * Loop through the delete list passed in and remove the rules from HW. After
+ * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
+ * longer being referenced by the aRFS hash table.
+ */
+static void
+ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
+{
+ struct ice_arfs_entry *e;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
+ int result;
+
+ result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
+ false);
+ if (!result)
+ ice_arfs_update_active_fltr_cntrs(vsi, e, false);
+ else
+ dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
+ result, e->fltr_state, e->fltr_info.fltr_id,
+ e->flow_id, e->fltr_info.q_index);
+
+ /* The aRFS hash table is no longer referencing this entry */
+ hlist_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_arfs_add_flow_rules - add the rules passed in from HW
+ * @vsi: VSI for the flow rules that need to be added
+ * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
+ *
+ * Loop through the add list passed in and remove the rules from HW. After each
+ * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
+ * the ice_arfs_entry(s) because they are still being referenced in the aRFS
+ * hash table.
+ */
+static void
+ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
+{
+ struct ice_arfs_entry_ptr *ep;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
+ int result;
+
+ result = ice_fdir_write_fltr(vsi->back,
+ &ep->arfs_entry->fltr_info, true,
+ false);
+ if (!result)
+ ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
+ true);
+ else
+ dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
+ result, ep->arfs_entry->fltr_state,
+ ep->arfs_entry->fltr_info.fltr_id,
+ ep->arfs_entry->flow_id,
+ ep->arfs_entry->fltr_info.q_index);
+
+ hlist_del(&ep->list_entry);
+ devm_kfree(dev, ep);
+ }
+}
+
+/**
+ * ice_arfs_is_flow_expired - check if the aRFS entry has expired
+ * @vsi: VSI containing the aRFS entry
+ * @arfs_entry: aRFS entry that's being checked for expiration
+ *
+ * Return true if the flow has expired, else false. This function should be used
+ * to determine whether or not an aRFS entry should be removed from the hardware
+ * and software structures.
+ */
+static bool
+ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
+{
+#define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000)
+ if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
+ arfs_entry->flow_id,
+ arfs_entry->fltr_info.fltr_id))
+ return true;
+
+ /* expiration timer only used for UDP filters */
+ if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
+ arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ return false;
+
+ return time_in_range64(arfs_entry->time_activated +
+ ICE_ARFS_TIME_DELTA_EXPIRATION,
+ arfs_entry->time_activated, get_jiffies_64());
+}
+
+/**
+ * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
+ * @vsi: the VSI to be forwarded to
+ * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
+ * @add_list: list to populate with filters to be added to Flow Director
+ * @del_list: list to populate with filters to be deleted from Flow Director
+ *
+ * Iterate over the hlist at the index given in the aRFS hash table and
+ * determine if there are any aRFS entries that need to be either added or
+ * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
+ * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
+ * the flow has expired delete the filter from HW. The caller of this function
+ * is expected to add/delete rules on the add_list/del_list respectively.
+ */
+static void
+ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
+ struct hlist_head *add_list,
+ struct hlist_head *del_list)
+{
+ struct ice_arfs_entry *e;
+ struct hlist_node *n;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(vsi->back);
+
+ /* go through the aRFS hlist at this idx and check for needed updates */
+ hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
+ /* check if filter needs to be added to HW */
+ if (e->fltr_state == ICE_ARFS_INACTIVE) {
+ enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
+ struct ice_arfs_entry_ptr *ep =
+ devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
+
+ if (!ep)
+ continue;
+ INIT_HLIST_NODE(&ep->list_entry);
+ /* reference aRFS entry to add HW filter */
+ ep->arfs_entry = e;
+ hlist_add_head(&ep->list_entry, add_list);
+ e->fltr_state = ICE_ARFS_ACTIVE;
+ /* expiration timer only used for UDP flows */
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ e->time_activated = get_jiffies_64();
+ } else if (e->fltr_state == ICE_ARFS_ACTIVE) {
+ /* check if filter needs to be removed from HW */
+ if (ice_arfs_is_flow_expired(vsi, e)) {
+ /* remove aRFS entry from hash table for delete
+ * and to prevent referencing it the next time
+ * through this hlist index
+ */
+ hlist_del(&e->list_entry);
+ e->fltr_state = ICE_ARFS_TODEL;
+ /* save reference to aRFS entry for delete */
+ hlist_add_head(&e->list_entry, del_list);
+ }
+ }
+}
+
+/**
+ * ice_sync_arfs_fltrs - update all aRFS filters
+ * @pf: board private structure
+ */
+void ice_sync_arfs_fltrs(struct ice_pf *pf)
+{
+ HLIST_HEAD(tmp_del_list);
+ HLIST_HEAD(tmp_add_list);
+ struct ice_vsi *pf_vsi;
+ unsigned int i;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ if (!ice_is_arfs_active(pf_vsi))
+ return;
+
+ spin_lock_bh(&pf_vsi->arfs_lock);
+ /* Once we process aRFS for the PF VSI get out */
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
+ ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
+ &tmp_del_list);
+ spin_unlock_bh(&pf_vsi->arfs_lock);
+
+ /* use list of ice_arfs_entry(s) for delete */
+ ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
+
+ /* use list of ice_arfs_entry_ptr(s) for add */
+ ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
+}
+
+/**
+ * ice_arfs_build_entry - builds an aRFS entry based on input
+ * @vsi: destination VSI for this flow
+ * @fk: flow dissector keys for creating the tuple
+ * @rxq_idx: Rx queue to steer this flow to
+ * @flow_id: passed down from the stack and saved for flow expiration
+ *
+ * returns an aRFS entry on success and NULL on failure
+ */
+static struct ice_arfs_entry *
+ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
+ u16 rxq_idx, u32 flow_id)
+{
+ struct ice_arfs_entry *arfs_entry;
+ struct ice_fdir_fltr *fltr_info;
+ u8 ip_proto;
+
+ arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
+ sizeof(*arfs_entry),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!arfs_entry)
+ return NULL;
+
+ fltr_info = &arfs_entry->fltr_info;
+ fltr_info->q_index = rxq_idx;
+ fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ fltr_info->dest_vsi = vsi->idx;
+ ip_proto = fk->basic.ip_proto;
+
+ if (fk->basic.n_proto == htons(ETH_P_IP)) {
+ fltr_info->ip.v4.proto = ip_proto;
+ fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP :
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
+ fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
+ fltr_info->ip.v4.src_port = fk->ports.src;
+ fltr_info->ip.v4.dst_port = fk->ports.dst;
+ } else { /* ETH_P_IPV6 */
+ fltr_info->ip.v6.proto = ip_proto;
+ fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP :
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+ fltr_info->ip.v6.src_port = fk->ports.src;
+ fltr_info->ip.v6.dst_port = fk->ports.dst;
+ }
+
+ arfs_entry->flow_id = flow_id;
+ fltr_info->fltr_id =
+ atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
+
+ return arfs_entry;
+}
+
+/**
+ * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
+ * @hw: pointer to HW structure
+ * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
+ * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
+ *
+ * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
+ * to check if perfect (4-tuple) flow rules are currently in place by Flow
+ * Director.
+ */
+static bool
+ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
+{
+ unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
+
+ /* advanced Flow Director disabled, perfect filters always supported */
+ if (!perfect_fltr)
+ return true;
+
+ if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
+ else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
+ return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
+
+ return false;
+}
+
+/**
+ * ice_rx_flow_steer - steer the Rx flow to where application is being run
+ * @netdev: ptr to the netdev being adjusted
+ * @skb: buffer with required header information
+ * @rxq_idx: queue to which the flow needs to move
+ * @flow_id: flow identifier provided by the netdev
+ *
+ * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
+ * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
+ * if the flow_id already exists in the hash table but the rxq_idx has changed
+ * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
+ * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
+ * If neither of the previous conditions are true then add a new entry in the
+ * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
+ * added to HW.
+ */
+int
+ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
+ u16 rxq_idx, u32 flow_id)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_arfs_entry *arfs_entry;
+ struct ice_vsi *vsi = np->vsi;
+ struct flow_keys fk;
+ struct ice_pf *pf;
+ __be16 n_proto;
+ u8 ip_proto;
+ u16 idx;
+ int ret;
+
+ /* failed to allocate memory for aRFS so don't crash */
+ if (unlikely(!vsi->arfs_fltr_list))
+ return -ENODEV;
+
+ pf = vsi->back;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
+
+ n_proto = fk.basic.n_proto;
+ /* Support only IPV4 and IPV6 */
+ if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
+ n_proto == htons(ETH_P_IPV6))
+ ip_proto = fk.basic.ip_proto;
+ else
+ return -EPROTONOSUPPORT;
+
+ /* Support only TCP and UDP */
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
+ return -EPROTONOSUPPORT;
+
+ /* only support 4-tuple filters for aRFS */
+ if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
+ return -EOPNOTSUPP;
+
+ /* choose the aRFS list bucket based on skb hash */
+ idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
+ /* search for entry in the bucket */
+ spin_lock_bh(&vsi->arfs_lock);
+ hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
+ list_entry) {
+ struct ice_fdir_fltr *fltr_info;
+
+ /* keep searching for the already existing arfs_entry flow */
+ if (arfs_entry->flow_id != flow_id)
+ continue;
+
+ fltr_info = &arfs_entry->fltr_info;
+ ret = fltr_info->fltr_id;
+
+ if (fltr_info->q_index == rxq_idx ||
+ arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
+ goto out;
+
+ /* update the queue to forward to on an already existing flow */
+ fltr_info->q_index = rxq_idx;
+ arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
+ ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
+ goto out_schedule_service_task;
+ }
+
+ arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
+ if (!arfs_entry) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = arfs_entry->fltr_info.fltr_id;
+ INIT_HLIST_NODE(&arfs_entry->list_entry);
+ hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
+out_schedule_service_task:
+ ice_service_task_schedule(pf);
+out:
+ spin_unlock_bh(&vsi->arfs_lock);
+ return ret;
+}
+
+/**
+ * ice_init_arfs_cntrs - initialize aRFS counter values
+ * @vsi: VSI that aRFS counters need to be initialized on
+ */
+static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
+{
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
+ GFP_KERNEL);
+ if (!vsi->arfs_fltr_cntrs)
+ return -ENOMEM;
+
+ vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
+ GFP_KERNEL);
+ if (!vsi->arfs_last_fltr_id) {
+ kfree(vsi->arfs_fltr_cntrs);
+ vsi->arfs_fltr_cntrs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_init_arfs - initialize aRFS resources
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_init_arfs(struct ice_vsi *vsi)
+{
+ struct hlist_head *arfs_fltr_list;
+ unsigned int i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return;
+
+ arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
+ GFP_KERNEL);
+ if (!arfs_fltr_list)
+ return;
+
+ if (ice_init_arfs_cntrs(vsi))
+ goto free_arfs_fltr_list;
+
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
+ INIT_HLIST_HEAD(&arfs_fltr_list[i]);
+
+ spin_lock_init(&vsi->arfs_lock);
+
+ vsi->arfs_fltr_list = arfs_fltr_list;
+
+ return;
+
+free_arfs_fltr_list:
+ kfree(arfs_fltr_list);
+}
+
+/**
+ * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_clear_arfs(struct ice_vsi *vsi)
+{
+ struct device *dev;
+ unsigned int i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
+ !vsi->arfs_fltr_list)
+ return;
+
+ dev = ice_pf_to_dev(vsi->back);
+ for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
+ struct ice_arfs_entry *r;
+ struct hlist_node *n;
+
+ spin_lock_bh(&vsi->arfs_lock);
+ hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
+ list_entry) {
+ hlist_del(&r->list_entry);
+ devm_kfree(dev, r);
+ }
+ spin_unlock_bh(&vsi->arfs_lock);
+ }
+
+ kfree(vsi->arfs_fltr_list);
+ vsi->arfs_fltr_list = NULL;
+ kfree(vsi->arfs_last_fltr_id);
+ vsi->arfs_last_fltr_id = NULL;
+ kfree(vsi->arfs_fltr_cntrs);
+ vsi->arfs_fltr_cntrs = NULL;
+}
+
+/**
+ * ice_free_cpu_rx_rmap - free setup CPU reverse map
+ * @vsi: the VSI to be forwarded to
+ */
+void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
+{
+ struct net_device *netdev;
+
+ if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev || !netdev->rx_cpu_rmap ||
+ netdev->reg_state != NETREG_REGISTERED)
+ return;
+
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+}
+
+/**
+ * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
+ * @vsi: the VSI to be forwarded to
+ */
+int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
+{
+ struct net_device *netdev;
+ struct ice_pf *pf;
+ int base_idx, i;
+
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ pf = vsi->back;
+ netdev = vsi->netdev;
+ if (!pf || !netdev || !vsi->num_q_vectors ||
+ vsi->netdev->reg_state != NETREG_REGISTERED)
+ return -EINVAL;
+
+ netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
+ vsi->type, netdev->name, vsi->num_q_vectors);
+
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
+ if (unlikely(!netdev->rx_cpu_rmap))
+ return -EINVAL;
+
+ base_idx = vsi->base_vector;
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ pf->msix_entries[base_idx + i].vector)) {
+ ice_free_cpu_rx_rmap(vsi);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_remove_arfs - remove/clear all aRFS resources
+ * @pf: device private structure
+ */
+void ice_remove_arfs(struct ice_pf *pf)
+{
+ struct ice_vsi *pf_vsi;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ ice_free_cpu_rx_rmap(pf_vsi);
+ ice_clear_arfs(pf_vsi);
+}
+
+/**
+ * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
+ * @pf: device private structure
+ */
+void ice_rebuild_arfs(struct ice_pf *pf)
+{
+ struct ice_vsi *pf_vsi;
+
+ pf_vsi = ice_get_main_vsi(pf);
+ if (!pf_vsi)
+ return;
+
+ ice_remove_arfs(pf);
+ if (ice_set_cpu_rx_rmap(pf_vsi)) {
+ dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
+ return;
+ }
+ ice_init_arfs(pf_vsi);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
new file mode 100644
index 000000000000..f39cd16403ed
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_ARFS_H_
+#define _ICE_ARFS_H_
+enum ice_arfs_fltr_state {
+ ICE_ARFS_INACTIVE,
+ ICE_ARFS_ACTIVE,
+ ICE_ARFS_TODEL,
+};
+
+struct ice_arfs_entry {
+ struct ice_fdir_fltr fltr_info;
+ struct hlist_node list_entry;
+ u64 time_activated; /* only valid for UDP flows */
+ u32 flow_id;
+ /* fltr_state = 0 - ICE_ARFS_INACTIVE:
+ * filter needs to be updated or programmed in HW.
+ * fltr_state = 1 - ICE_ARFS_ACTIVE:
+ * filter is active and programmed in HW.
+ * fltr_state = 2 - ICE_ARFS_TODEL:
+ * filter has been deleted from HW and needs to be removed from
+ * the aRFS hash table.
+ */
+ u8 fltr_state;
+};
+
+struct ice_arfs_entry_ptr {
+ struct ice_arfs_entry *arfs_entry;
+ struct hlist_node list_entry;
+};
+
+struct ice_arfs_active_fltr_cntrs {
+ atomic_t active_tcpv4_cnt;
+ atomic_t active_tcpv6_cnt;
+ atomic_t active_udpv4_cnt;
+ atomic_t active_udpv6_cnt;
+};
+
+#ifdef CONFIG_RFS_ACCEL
+int
+ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
+ u16 rxq_idx, u32 flow_id);
+void ice_clear_arfs(struct ice_vsi *vsi);
+void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
+void ice_init_arfs(struct ice_vsi *vsi);
+void ice_sync_arfs_fltrs(struct ice_pf *pf);
+int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
+void ice_remove_arfs(struct ice_pf *pf);
+void ice_rebuild_arfs(struct ice_pf *pf);
+bool
+ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
+ enum ice_fltr_ptype flow_type);
+#else
+#define ice_sync_arfs_fltrs(pf) do {} while (0)
+#define ice_init_arfs(vsi) do {} while (0)
+#define ice_clear_arfs(vsi) do {} while (0)
+#define ice_remove_arfs(pf) do {} while (0)
+#define ice_free_cpu_rx_rmap(vsi) do {} while (0)
+#define ice_rebuild_arfs(pf) do {} while (0)
+
+static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi)
+{
+ return 0;
+}
+
+static inline int
+ice_rx_flow_steer(struct net_device __always_unused *netdev,
+ const struct sk_buff __always_unused *skb,
+ u16 __always_unused rxq_idx, u32 __always_unused flow_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool
+ice_is_arfs_using_perfect_flow(struct ice_hw __always_unused *hw,
+ enum ice_fltr_ptype __always_unused flow_type)
+{
+ return false;
+}
+#endif /* CONFIG_RFS_ACCEL */
+#endif /* _ICE_ARFS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index a19cd6f5436b..d620d26d42ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */
+#include <net/xdp_sock_drv.h>
#include "ice_base.h"
+#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
@@ -12,7 +14,7 @@
*/
static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
{
- int offset, i;
+ unsigned int offset, i;
mutex_lock(qs_cfg->qs_mutex);
offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
@@ -24,7 +26,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
for (i = 0; i < qs_cfg->q_count; i++)
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
mutex_unlock(qs_cfg->qs_mutex);
return 0;
@@ -38,7 +40,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
*/
static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
{
- int i, index = 0;
+ unsigned int i, index = 0;
mutex_lock(qs_cfg->qs_mutex);
for (i = 0; i < qs_cfg->q_count; i++) {
@@ -47,7 +49,7 @@ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
if (index >= qs_cfg->pf_map_size)
goto err_scatter;
set_bit(index, qs_cfg->pf_map);
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
}
mutex_unlock(qs_cfg->qs_mutex);
@@ -96,7 +98,7 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* We allocate one q_vector and set default value for ITR setting associated
* with this q_vector. If allocation fails we return -ENOMEM.
*/
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
{
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
@@ -246,6 +248,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
switch (vsi->type) {
case ICE_VSI_LB:
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -279,12 +282,13 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
int ice_setup_rx_ctx(struct ice_ring *ring)
{
+ struct device *dev = ice_pf_to_dev(ring->vsi->back);
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
+ u16 num_bufs = ICE_DESC_UNUSED(ring);
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
struct ice_hw *hw;
- u32 regval;
u16 pf_q;
int err;
@@ -308,24 +312,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (ring->xsk_umem) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ ring->rx_buf_len =
+ xsk_umem_get_rx_frame_size(ring->xsk_umem);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
*/
chain_len = 1;
- ring->zca.free = ice_zca_free;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &ring->zca);
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
if (err)
return err;
+ xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
- dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- ring->zca.free = NULL;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
@@ -376,38 +379,27 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
chain_len * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- if (vsi->type != ICE_VSI_VF) {
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile ID;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- } else {
- regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
- QRXFLXP_CNTXT_RXDID_PRIO_M |
- QRXFLXP_CNTXT_TS_M);
- }
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ * increasing context priority to pick up profile ID; default is 0x01;
+ * setting to 0x03 to ensure profile is programming if prev context is
+ * of same priority
+ */
+ if (vsi->type != ICE_VSI_VF)
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
+ else
+ ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
@@ -425,13 +417,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- err = ring->xsk_umem ?
- ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
- if (err)
- dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
- ring->q_index, pf_q);
+ if (ring->xsk_umem) {
+ if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
+ dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ num_bufs, ring->q_index);
+ dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
+
+ return 0;
+ }
+
+ err = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ if (err)
+ dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ ring->q_index, pf_q);
+ return 0;
+ }
+
+ ice_alloc_rx_bufs(ring, num_bufs);
return 0;
}
@@ -453,7 +455,7 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
if (ret) {
/* contig failed, so try with scatter approach */
qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
- qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
qs_cfg->scatter_count);
ret = __ice_vsi_get_qs_sc(qs_cfg);
}
@@ -526,7 +528,8 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
- int v_idx, err;
+ u16 v_idx;
+ int err;
if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
@@ -562,7 +565,7 @@ err_out:
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
{
int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
+ u16 tx_rings_rem, rx_rings_rem;
int v_id;
/* initially assigning remaining rings count to VSIs num queue value */
@@ -571,10 +574,12 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
for (v_id = 0; v_id < q_vectors; v_id++) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+ u8 tx_rings_per_v, rx_rings_per_v;
+ u16 q_id, q_base;
/* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
+ q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
@@ -590,7 +595,8 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_rem -= tx_rings_per_v;
/* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
+ q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
@@ -633,6 +639,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
u8 buf_len = sizeof(*qg_buf);
+ struct ice_hw *hw = &pf->hw;
enum ice_status status;
u16 pf_q;
u8 tc;
@@ -641,13 +648,13 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
- ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
if (IS_ENABLED(CONFIG_DCB))
tc = ring->dcb_tc;
@@ -662,8 +669,8 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
- status);
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
+ ice_stat_str(status));
return -ENODEV;
}
@@ -832,8 +839,8 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
- status);
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
+ ice_stat_str(status));
return -ENODEV;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2c0d8fd3d5cd..bce0e1281168 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -316,12 +316,78 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
+ * ice_fill_tx_timer_and_fc_thresh
+ * @hw: pointer to the HW struct
+ * @cmd: pointer to MAC cfg structure
+ *
+ * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
+ * descriptor
+ */
+static void
+ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
+ struct ice_aqc_set_mac_cfg *cmd)
+{
+ u16 fc_thres_val, tx_timer_val;
+ u32 val;
+
+ /* We read back the transmit timer and FC threshold value of
+ * LFC. Thus, we will use index =
+ * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
+ *
+ * Also, because we are operating on transmit timer and FC
+ * threshold of LFC, we don't turn on any bit in tx_tmr_priority
+ */
+#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
+
+ /* Retrieve the transmit timer */
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
+ tx_timer_val = val &
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
+
+ /* Retrieve the FC threshold */
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
+ fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+
+ cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
+}
+
+/**
+ * ice_aq_set_mac_cfg
+ * @hw: pointer to the HW struct
+ * @max_frame_size: Maximum Frame Size to be supported
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603)
+ */
+enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_mac_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_mac_cfg;
+
+ if (max_frame_size == 0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
+
+ cmd->max_frame_size = cpu_to_le16(max_frame_size);
+
+ ice_fill_tx_timer_and_fc_thresh(hw, cmd);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
+ enum ice_status status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
@@ -332,7 +398,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- return ice_init_def_sw_recp(hw);
+ status = ice_init_def_sw_recp(hw);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
+ return status;
+ }
+ return 0;
}
/**
@@ -653,6 +724,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
+ /* Set bit to enable Flow Director filters */
+ wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
+ INIT_LIST_HEAD(&hw->fdir_list_head);
+
ice_clear_pxe_mode(hw);
status = ice_init_nvm(hw);
@@ -743,9 +818,18 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
+ /* enable jumbo frame support at MAC level */
+ status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+ /* Obtain counter base index which would be used by flow director */
+ status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
+ mutex_init(&hw->tnl_lock);
return 0;
err_unroll_fltr_mgmt_struct:
@@ -769,12 +853,14 @@ err_unroll_cqinit:
*/
void ice_deinit_hw(struct ice_hw *hw)
{
+ ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
ice_cleanup_fltr_mgmt_struct(hw);
ice_sched_cleanup_all(hw);
ice_sched_clear_agg(hw);
ice_free_seg(hw);
ice_free_hw_tbls(hw);
+ mutex_destroy(&hw->tnl_lock);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
@@ -878,7 +964,12 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
- for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ /* Wait for the PFR to complete. The wait time is the global config lock
+ * timeout plus the PFR timeout which will account for a possible reset
+ * that is occurring during a download package operation.
+ */
+ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
+ ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
@@ -1012,7 +1103,7 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
rlan_ctx->prefena = 1;
- ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
+ ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
@@ -1678,6 +1769,33 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
+ case ICE_AQC_CAPS_FD:
+ if (dev_p) {
+ dev_p->num_flow_director_fltr = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_flow_director_fltr = %d\n",
+ prefix,
+ dev_p->num_flow_director_fltr);
+ }
+ if (func_p) {
+ u32 reg_val, val;
+
+ reg_val = rd32(hw, GLQF_FD_SIZE);
+ val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
+ GLQF_FD_SIZE_FD_GSIZE_S;
+ func_p->fd_fltr_guar =
+ ice_get_num_per_func(hw, val);
+ val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
+ GLQF_FD_SIZE_FD_BSIZE_S;
+ func_p->fd_fltr_best_effort = val;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: fd_fltr_guar = %d\n",
+ prefix, func_p->fd_fltr_guar);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: fd_fltr_best_effort = %d\n",
+ prefix, func_p->fd_fltr_best_effort);
+ }
+ break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
@@ -1887,10 +2005,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags;
-
- /* Prep values for flags, sah, sal */
- cmd->sah = htons(*((const u16 *)mac_addr));
- cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
+ ether_addr_copy(cmd->mac_addr, mac_addr);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -2117,6 +2232,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
+ enum ice_status status;
if (!cfg)
return ICE_ERR_PARAM;
@@ -2145,7 +2261,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
- return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+ status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ status = 0;
+
+ return status;
}
/**
@@ -3089,12 +3209,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/**
* ice_set_ctx - set context bits in packed structure
+ * @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
enum ice_status
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -3103,6 +3225,12 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* using the correct size so that we are correct regardless
* of the endianness of the machine.
*/
+ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
+ ice_debug(hw, ICE_DBG_QCTX,
+ "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
+ f, ce_info[f].width, ce_info[f].size_of);
+ continue;
+ }
switch (ce_info[f].size_of) {
case sizeof(u8):
ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 8104f3d64d96..9b9e50d2398b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -70,7 +70,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
@@ -108,6 +109,8 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
+enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index dd946866d7b8..1e18021aa073 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -12,6 +12,7 @@ do { \
(qinfo)->sq.bal = prefix##_ATQBAL; \
(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
(qinfo)->rq.head = prefix##_ARQH; \
(qinfo)->rq.tail = prefix##_ARQT; \
@@ -20,6 +21,7 @@ do { \
(qinfo)->rq.bal = prefix##_ARQBAL; \
(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
} while (0)
@@ -199,7 +201,9 @@ unwind_alloc_rq_bufs:
cq->rq.r.rq_bi[i].pa = 0;
cq->rq.r.rq_bi[i].size = 0;
}
+ cq->rq.r.rq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
+ cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -245,7 +249,9 @@ unwind_alloc_sq_bufs:
cq->sq.r.sq_bi[i].pa = 0;
cq->sq.r.sq_bi[i].size = 0;
}
+ cq->sq.r.sq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+ cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -304,6 +310,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return 0;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ if ((qi)->ring.r.ring##_bi) \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) { \
+ dmam_free_coherent(ice_hw_to_dev(hw), \
+ (qi)->ring.r.ring##_bi[i].size, \
+ (qi)->ring.r.ring##_bi[i].va, \
+ (qi)->ring.r.ring##_bi[i].pa); \
+ (qi)->ring.r.ring##_bi[i].va = NULL;\
+ (qi)->ring.r.ring##_bi[i].pa = 0;\
+ (qi)->ring.r.ring##_bi[i].size = 0;\
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ /* free DMA head */ \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_init_sq - main initialization routine for Control ATQ
* @hw: pointer to the hardware structure
@@ -357,6 +385,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
@@ -416,33 +445,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
-#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
-do { \
- int i; \
- /* free descriptors */ \
- for (i = 0; i < (qi)->num_##ring##_entries; i++) \
- if ((qi)->ring.r.ring##_bi[i].pa) { \
- dmam_free_coherent(ice_hw_to_dev(hw), \
- (qi)->ring.r.ring##_bi[i].size,\
- (qi)->ring.r.ring##_bi[i].va,\
- (qi)->ring.r.ring##_bi[i].pa);\
- (qi)->ring.r.ring##_bi[i].va = NULL; \
- (qi)->ring.r.ring##_bi[i].pa = 0; \
- (qi)->ring.r.ring##_bi[i].size = 0; \
- } \
- /* free the buffer info list */ \
- if ((qi)->ring.cmd_buf) \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
- /* free DMA head */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
-} while (0)
-
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -635,6 +644,50 @@ init_ctrlq_free_sq:
}
/**
+ * ice_shutdown_ctrlq - shutdown routine for any control queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
+ */
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ if (ice_check_sq_alive(hw, cq))
+ ice_aq_q_shutdown(hw, true);
+ break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
+ default:
+ return;
+ }
+
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
+}
+
+/**
+ * ice_shutdown_all_ctrlq - shutdown routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
+ */
+void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+{
+ /* Shutdown FW admin queue */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+}
+
+/**
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
@@ -649,17 +702,27 @@ init_ctrlq_free_sq:
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status ret_code;
+ enum ice_status status;
+ u32 retry = 0;
/* Init FW admin queue */
- ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
- if (ret_code)
- return ret_code;
+ do {
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ if (status)
+ return status;
- ret_code = ice_init_check_adminq(hw);
- if (ret_code)
- return ret_code;
+ status = ice_init_check_adminq(hw);
+ if (status != ICE_ERR_AQ_FW_CRITICAL)
+ break;
+
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Retry Admin Queue init due to FW critical error\n");
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
+ } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
+ if (status)
+ return status;
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -701,57 +764,12 @@ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
}
/**
- * ice_shutdown_ctrlq - shutdown routine for any control queue
- * @hw: pointer to the hardware structure
- * @q_type: specific Control queue type
- *
- * NOTE: this function does not destroy the control queue locks.
- */
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
-{
- struct ice_ctl_q_info *cq;
-
- switch (q_type) {
- case ICE_CTL_Q_ADMIN:
- cq = &hw->adminq;
- if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
- break;
- case ICE_CTL_Q_MAILBOX:
- cq = &hw->mailboxq;
- break;
- default:
- return;
- }
-
- ice_shutdown_sq(hw, cq);
- ice_shutdown_rq(hw, cq);
-}
-
-/**
- * ice_shutdown_all_ctrlq - shutdown routine for all control queues
- * @hw: pointer to the hardware structure
- *
- * NOTE: this function does not destroy the control queue locks. The driver
- * may call this at runtime to shutdown and later restart control queues, such
- * as in response to a reset event.
- */
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
-{
- /* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
- /* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
-}
-
-/**
* ice_destroy_ctrlq_locks - Destroy locks for a control queue
* @cq: pointer to the control queue
*
* Destroys the send and receive queue locks for a given control queue.
*/
-static void
-ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
{
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
@@ -1042,9 +1060,15 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* update the error if time out occurred */
if (!cmd_completed) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue Writeback timeout.\n");
- status = ICE_ERR_AQ_TIMEOUT;
+ if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
+ rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
+ status = ICE_ERR_AQ_FW_CRITICAL;
+ } else {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue Writeback timeout.\n");
+ status = ICE_ERR_AQ_TIMEOUT;
+ }
}
sq_send_command_error:
@@ -1128,7 +1152,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
memcpy(&e->desc, desc, sizeof(e->desc));
datalen = le16_to_cpu(desc->datalen);
- e->msg_len = min(datalen, e->buf_len);
+ e->msg_len = min_t(u16, datalen, e->buf_len);
if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index bf0ebe6149e8..faaa08e8171b 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -34,6 +34,8 @@ enum ice_ctl_q {
/* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
+#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
+#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
@@ -59,6 +61,7 @@ struct ice_ctl_q_ring {
u32 bal;
u32 len_mask;
u32 len_ena_mask;
+ u32 len_crit_mask;
u32 head_mask;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 7bea09363b42..979af197f8a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -63,6 +63,64 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_is_pfc_causing_hung_q
+ * @pf: pointer to PF structure
+ * @txqueue: Tx queue which is supposedly hung queue
+ *
+ * find if PFC is causing the hung queue, if yes return true else false
+ */
+bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue)
+{
+ u8 num_tcs = 0, i, tc, up_mapped_tc, up_in_tc = 0;
+ u64 ref_prio_xoff[ICE_MAX_UP];
+ struct ice_vsi *vsi;
+ u32 up2tc;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ num_tcs++;
+
+ /* first find out the TC to which the hung queue belongs to */
+ for (tc = 0; tc < num_tcs - 1; tc++)
+ if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset,
+ vsi->tc_cfg.tc_info[tc + 1].qoffset,
+ txqueue))
+ break;
+
+ /* Build a bit map of all UPs associated to the suspect hung queue TC,
+ * so that we check for its counter increment.
+ */
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+ for (i = 0; i < ICE_MAX_UP; i++) {
+ up_mapped_tc = (up2tc >> (i * 3)) & 0x7;
+ if (up_mapped_tc == tc)
+ up_in_tc |= BIT(i);
+ }
+
+ /* Now that we figured out that hung queue is PFC enabled, still the
+ * Tx timeout can be legitimate. So to make sure Tx timeout is
+ * absolutely caused by PFC storm, check if the counters are
+ * incrementing.
+ */
+ for (i = 0; i < ICE_MAX_UP; i++)
+ if (up_in_tc & BIT(i))
+ ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i];
+
+ ice_update_dcb_stats(pf);
+
+ for (i = 0; i < ICE_MAX_UP; i++)
+ if (up_in_tc & BIT(i))
+ if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i])
+ return true;
+
+ return false;
+}
+
+/**
* ice_dcb_get_mode - gets the DCB mode
* @port_info: pointer to port info structure
* @host: if set it's HOST if not it's MANAGED
@@ -526,16 +584,21 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
*/
static bool ice_dcb_tc_contig(u8 *prio_table)
{
- u8 max_tc = 0;
+ bool found_empty = false;
+ u8 used_tc = 0;
int i;
- for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
- u8 cur_tc = prio_table[i];
+ /* Create a bitmap of used TCs */
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
+ used_tc |= BIT(prio_table[i]);
- if (cur_tc > max_tc)
- return false;
- else if (cur_tc == max_tc)
- max_tc++;
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
+ if (used_tc & BIT(i)) {
+ if (found_empty)
+ return false;
+ } else {
+ found_empty = true;
+ }
}
return true;
@@ -728,39 +791,31 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
* @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf
+ *
+ * This should not be called if the outer VLAN is software offloaded as the VLAN
+ * tag will already be configured with the correct ID and priority bits
*/
-int
+void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
- return 0;
+ return;
/* Insert 802.1p priority into VLAN header */
- if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
+ if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */
first->tx_flags |= (skb->priority & 0x7) <<
ICE_TX_FLAGS_VLAN_PR_S;
- if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
- struct vlan_ethhdr *vhdr;
- int rc;
-
- rc = skb_cow_head(skb, 0);
- if (rc < 0)
- return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
- vhdr->h_vlan_TCI = htons(first->tx_flags >>
- ICE_TX_FLAGS_VLAN_S);
- } else {
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
- }
+ /* if this is not already set it means a VLAN 0 + priority needs
+ * to be offloaded
+ */
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
-
- return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 37680e815b02..323238669572 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -17,6 +17,8 @@
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
+bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
@@ -25,13 +27,27 @@ void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
-int
+void
ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
+
+/**
+ * ice_find_q_in_range
+ * @low: start of queue range for a TC i.e. offset of TC
+ * @high: start of queue for next TC
+ * @tx_q: hung_queue/tx_queue
+ *
+ * finds if queue 'tx_q' falls between the two offsets of any given TC
+ */
+static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
+{
+ return (tx_q >= low) && (tx_q < high);
+}
+
static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
@@ -79,6 +95,13 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
+static inline bool
+ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
+ unsigned int __always_unused txqueue)
+{
+ return false;
+}
+
#define ice_update_dcb_stats(pf) do {} while (0)
#define ice_pf_dcb_recfg(pf) do {} while (0)
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index c4c12414083a..87f91b750d59 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -7,8 +7,6 @@
#include "ice_dcb_nl.h"
#include <net/dcbnl.h>
-#define ICE_APP_PROT_ID_ROCE 0x8915
-
/**
* ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info
* @netdev: device associated with interface that needs reset
@@ -671,7 +669,7 @@ static bool
ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
struct ice_dcb_app_priority_table *app)
{
- int i;
+ unsigned int i;
for (i = 0; i < cfg->numapps; i++) {
if (app->selector == cfg->app[i].selector &&
@@ -746,7 +744,8 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *old_cfg, *new_cfg;
- int i, j, ret = 0;
+ unsigned int i, j;
+ int ret = 0;
if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
return -EINVAL;
@@ -869,7 +868,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi)
struct ice_port_info *pi;
struct dcb_app sapp;
struct ice_pf *pf;
- int i;
+ unsigned int i;
if (!netdev)
return;
@@ -941,7 +940,7 @@ ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg)
{
struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
- int i;
+ unsigned int i;
if (!main_vsi)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index c6833944b90a..a73d06e06b5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -105,6 +105,27 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0;
}
+static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+
+ /* The netlist version fields are BCD formatted */
+ snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
+ netlist->cust_ver);
+
+ return 0;
+}
+
+static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+
+ snprintf(buf, len, "0x%08x", netlist->hash);
+
+ return 0;
+}
+
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
@@ -128,6 +149,8 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+ running("fw.netlist", ice_info_netlist_ver),
+ running("fw.netlist.build", ice_info_netlist_build),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 593fb37bd59e..68c38004a088 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -5,6 +5,7 @@
#include "ice.h"
#include "ice_flow.h"
+#include "ice_fltr.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -129,6 +130,8 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
+ ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
+ ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
};
static const u32 ice_regs_dump_list[] = {
@@ -139,9 +142,6 @@ static const u32 ice_regs_dump_list[] = {
QINT_RQCTL(0),
PFINT_OICR_ENA,
QRX_ITR(0),
- PF0INT_ITR_0(0),
- PF0INT_ITR_1(0),
- PF0INT_ITR_2(0),
};
struct ice_priv_flag {
@@ -157,6 +157,8 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("vf-true-promisc-support",
+ ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -203,7 +205,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
struct ice_pf *pf = np->vsi->back;
struct ice_hw *hw = &pf->hw;
u32 *regs_buf = (u32 *)p;
- int i;
+ unsigned int i;
regs->version = 1;
@@ -273,8 +275,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
- dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -282,8 +285,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
false);
if (status) {
- dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto release;
}
@@ -304,7 +308,7 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- int i;
+ unsigned int i;
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
@@ -332,7 +336,8 @@ static u64 ice_link_test(struct net_device *netdev)
netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) {
- netdev_err(netdev, "link query error, status = %d\n", status);
+ netdev_err(netdev, "link query error, status = %s\n",
+ ice_stat_str(status));
return 1;
}
@@ -373,7 +378,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
0x00000000, 0xFFFFFFFF
};
u32 val, orig_val;
- int i;
+ unsigned int i;
orig_val = rd32(hw, reg);
for (i = 0; i < ARRAY_SIZE(patterns); ++i) {
@@ -426,7 +431,7 @@ static u64 ice_reg_test(struct net_device *netdev)
GLINT_ITR(2, 1) - GLINT_ITR(2, 0)},
{GLINT_CTL, 0xffff0001, 1, 0}
};
- int i;
+ unsigned int i;
netdev_dbg(netdev, "Register test\n");
for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) {
@@ -671,7 +676,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
- LIST_HEAD(tmp_list);
struct device *dev;
u8 *tx_frame;
int i;
@@ -707,16 +711,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
/* Test VSI needs to receive broadcast packets */
eth_broadcast_addr(broadcast);
- if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) {
+ if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) {
ret = 5;
goto lbtest_mac_dis;
}
- if (ice_add_mac(&pf->hw, &tmp_list)) {
- ret = 6;
- goto free_mac_list;
- }
-
if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 7;
goto remove_mac_filters;
@@ -739,10 +738,8 @@ static u64 ice_loopback_test(struct net_device *netdev)
lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
- if (ice_remove_mac(&pf->hw, &tmp_list))
+ if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
-free_mac_list:
- ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
/* Disable MAC loopback after the test is completed. */
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
@@ -1158,8 +1155,9 @@ static int ice_nway_reset(struct net_device *netdev)
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) {
- netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
- status, pi->hw->adminq.sq_last_status);
+ netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(pi->hw->adminq.sq_last_status));
return -EIO;
}
@@ -1308,6 +1306,16 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_down(vsi);
ice_up(vsi);
}
+ /* don't allow modification of this flag when a single VF is in
+ * promiscuous mode because it's not supported
+ */
+ if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
+ ice_is_any_vf_in_promisc(pf)) {
+ dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
+ /* toggle bit back to previous state */
+ change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
+ ret = -EAGAIN;
+ }
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -2450,8 +2458,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
- dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
return -EINVAL;
}
@@ -2526,6 +2534,10 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
struct ice_vsi *vsi = np->vsi;
switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return ice_add_fdir_ethtool(vsi, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return ice_del_fdir_ethtool(vsi, cmd);
case ETHTOOL_SRXFH:
return ice_set_rss_hash_opt(vsi, cmd);
default:
@@ -2549,12 +2561,27 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = -EOPNOTSUPP;
+ struct ice_hw *hw;
+
+ hw = &vsi->back->hw;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = vsi->rss_size;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = hw->fdir_active_fltr;
+ /* report total rule count */
+ cmd->data = ice_get_fdir_cnt_all(hw);
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = ice_get_ethtool_fdir_entry(hw, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
+ break;
case ETHTOOL_GRXFH:
ice_get_rss_hash_opt(vsi, cmd);
ret = 0;
@@ -2593,7 +2620,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
- u32 new_rx_cnt, new_tx_cnt;
+ u16 new_rx_cnt, new_tx_cnt;
if (ring->tx_pending > ICE_MAX_NUM_DESC ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
@@ -2645,8 +2672,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
if (ice_is_xdp_ena_vsi(vsi))
for (i = 0; i < vsi->num_xdp_txq; i++)
vsi->xdp_rings[i]->count = new_tx_cnt;
- vsi->num_tx_desc = new_tx_cnt;
- vsi->num_rx_desc = new_rx_cnt;
+ vsi->num_tx_desc = (u16)new_tx_cnt;
+ vsi->num_rx_desc = (u16)new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -2952,31 +2979,22 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
status = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
- if (!test_bit(__ICE_DOWN, pf->state)) {
- /* Give it a little more time to try to come back. If still
- * down, restart autoneg link or reinitialize the interface.
- */
- msleep(75);
- if (!test_bit(__ICE_DOWN, pf->state))
- return ice_nway_reset(netdev);
-
- ice_down(vsi);
- ice_up(vsi);
- }
-
return err;
}
@@ -3171,10 +3189,6 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- /* check to see if VSI is active */
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
-
/* report maximum channels */
ch->max_rx = ice_get_max_rxq(pf);
ch->max_tx = ice_get_max_txq(pf);
@@ -3184,6 +3198,10 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
ch->combined_count = ice_get_combined_cnt(vsi);
ch->rx_count = vsi->num_rxq - ch->combined_count;
ch->tx_count = vsi->num_txq - ch->combined_count;
+
+ /* report other queues */
+ ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
+ ch->max_other = ch->other_count;
}
/**
@@ -3227,8 +3245,9 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
vsi->rss_table_size);
if (status) {
- dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
err = -EIO;
}
@@ -3255,9 +3274,14 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EOPNOTSUPP;
}
/* do not support changing other_count */
- if (ch->other_count)
+ if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
return -EINVAL;
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
+ netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
+ return -EOPNOTSUPP;
+ }
+
curr_combined = ice_get_combined_cnt(vsi);
/* these checks are for cases where user didn't specify a particular
@@ -3731,10 +3755,10 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ice_hw *hw = &pf->hw;
enum ice_status status;
bool is_sfp = false;
+ unsigned int i;
u16 offset = 0;
u8 value = 0;
u8 page = 0;
- int i;
status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0,
&value, 1, 0, NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
new file mode 100644
index 000000000000..d7430ce6af26
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+/* flow director ethtool support for ice */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_flow.h"
+
+static struct in6_addr full_ipv6_addr_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
+ }
+};
+
+static struct in6_addr zero_ipv6_addr_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }
+ }
+};
+
+/* calls to ice_flow_add_prof require the number of segments in the array
+ * for segs_cnt. In this code that is one more than the index.
+ */
+#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
+
+/**
+ * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
+ * flow type values
+ * @flow: filter type to be converted
+ *
+ * Returns the corresponding ethtool flow type.
+ */
+static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
+{
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ return TCP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ return UDP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ return SCTP_V4_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ return IPV4_USER_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ return TCP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ return UDP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ return SCTP_V6_FLOW;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ return IPV6_USER_FLOW;
+ default:
+ /* 0 is undefined ethtool flow */
+ return 0;
+ }
+}
+
+/**
+ * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
+ * @eth: Ethtool flow type to be converted
+ *
+ * Returns flow enum
+ */
+static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
+{
+ switch (eth) {
+ case TCP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ case UDP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ case SCTP_V4_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ case IPV4_USER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ case TCP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+ case UDP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ case SCTP_V6_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+ case IPV6_USER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+ default:
+ return ICE_FLTR_PTYPE_NONF_NONE;
+ }
+}
+
+/**
+ * ice_is_mask_valid - check mask field set
+ * @mask: full mask to check
+ * @field: field for which mask should be valid
+ *
+ * If the mask is fully set return true. If it is not valid for field return
+ * false.
+ */
+static bool ice_is_mask_valid(u64 mask, u64 field)
+{
+ return (mask & field) == field;
+}
+
+/**
+ * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
+ * @hw: hardware structure that contains filter list
+ * @cmd: ethtool command data structure to receive the filter data
+ *
+ * Returns 0 on success and -EINVAL on failure
+ */
+int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct ice_fdir_fltr *rule;
+ int ret = 0;
+ u16 idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+
+ rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
+
+ if (!rule || fsp->location != rule->fltr_id) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
+
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case IPV4_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
+ fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
+ fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
+ fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
+ fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
+ fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
+ fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
+ fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
+ fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
+ fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
+ fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
+ break;
+ case IPV6_USER_FLOW:
+ fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
+ fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
+ fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
+ fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
+ fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
+ rule->mask.v6.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
+ rule->mask.v6.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
+ fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
+ break;
+ default:
+ break;
+ }
+
+ if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+ idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
+ if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
+ dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
+ rule->flow_type);
+ ret = -EINVAL;
+ }
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+ return ret;
+}
+
+/**
+ * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
+ * @hw: hardware structure containing the filter list
+ * @cmd: ethtool command data structure
+ * @rule_locs: ethtool array passed in from OS to receive filter IDs
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+int
+ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct ice_fdir_fltr *f_rule;
+ unsigned int cnt = 0;
+ int val = 0;
+
+ /* report total rule count */
+ cmd->data = ice_get_fdir_cnt_all(hw);
+
+ mutex_lock(&hw->fdir_fltr_lock);
+
+ list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
+ if (cnt == cmd->rule_cnt) {
+ val = -EMSGSIZE;
+ goto release_lock;
+ }
+ rule_locs[cnt] = f_rule->fltr_id;
+ cnt++;
+ }
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+ if (!val)
+ cmd->rule_cnt = cnt;
+ return val;
+}
+
+/**
+ * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow: FDir flow type to release
+ */
+static struct ice_fd_hw_prof *
+ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
+{
+ if (blk == ICE_BLK_FD && hw->fdir_prof)
+ return hw->fdir_prof[flow];
+
+ return NULL;
+}
+
+/**
+ * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow: FDir flow type to release
+ */
+static void
+ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
+{
+ struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
+ int tun;
+
+ if (!prof)
+ return;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ u64 prof_id;
+ int j;
+
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ for (j = 0; j < prof->cnt; j++) {
+ u16 vsi_num;
+
+ if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
+ continue;
+ vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
+ prof->entry_h[j][tun] = 0;
+ }
+ ice_flow_rem_prof(hw, blk, prof_id);
+ }
+}
+
+/**
+ * ice_fdir_rem_flow - release the ice_flow structures for a filter type
+ * @hw: hardware structure containing the filter list
+ * @blk: hardware block
+ * @flow_type: FDir flow type to release
+ */
+static void
+ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
+ enum ice_fltr_ptype flow_type)
+{
+ int flow = (int)flow_type & ~FLOW_EXT;
+ struct ice_fd_hw_prof *prof;
+ int tun, i;
+
+ prof = ice_fdir_get_hw_prof(hw, blk, flow);
+ if (!prof)
+ return;
+
+ ice_fdir_erase_flow_from_hw(hw, blk, flow);
+ for (i = 0; i < prof->cnt; i++)
+ prof->vsi_h[i] = 0;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ if (!prof->fdir_seg[tun])
+ continue;
+ devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
+ prof->fdir_seg[tun] = NULL;
+ }
+ prof->cnt = 0;
+}
+
+/**
+ * ice_fdir_release_flows - release all flows in use for later replay
+ * @hw: pointer to HW instance
+ */
+void ice_fdir_release_flows(struct ice_hw *hw)
+{
+ int flow;
+
+ /* release Flow Director HW table entries */
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
+ ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
+}
+
+/**
+ * ice_fdir_replay_flows - replay HW Flow Director filter info
+ * @hw: pointer to HW instance
+ */
+void ice_fdir_replay_flows(struct ice_hw *hw)
+{
+ int flow;
+
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ int tun;
+
+ if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
+ continue;
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ struct ice_flow_prof *hw_prof;
+ struct ice_fd_hw_prof *prof;
+ u64 prof_id;
+ int j;
+
+ prof = hw->fdir_prof[flow];
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
+ prof->fdir_seg[tun], TNL_SEG_CNT(tun),
+ &hw_prof);
+ for (j = 0; j < prof->cnt; j++) {
+ enum ice_flow_priority prio;
+ u64 entry_h = 0;
+ int err;
+
+ prio = ICE_FLOW_PRIO_NORMAL;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD,
+ prof_id,
+ prof->vsi_h[0],
+ prof->vsi_h[j],
+ prio, prof->fdir_seg,
+ &entry_h);
+ if (err) {
+ dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
+ flow);
+ continue;
+ }
+ prof->entry_h[j][tun] = entry_h;
+ }
+ }
+ }
+}
+
+/**
+ * ice_parse_rx_flow_user_data - deconstruct user-defined data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @data: pointer to userdef data structure for storage
+ *
+ * Returns 0 on success, negative error value on failure
+ */
+static int
+ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct ice_rx_flow_userdef *data)
+{
+ u64 value, mask;
+
+ memset(data, 0, sizeof(*data));
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
+ mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
+ if (!mask)
+ return 0;
+
+#define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0)
+#define ICE_USERDEF_FLEX_OFFS_S 16
+#define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
+#define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0)
+
+ /* 0x1fe is the maximum value for offsets stored in the internal
+ * filtering tables.
+ */
+#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
+
+ if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
+ value > ICE_USERDEF_FLEX_FLTR_M)
+ return -EINVAL;
+
+ data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
+ data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
+ ICE_USERDEF_FLEX_OFFS_S;
+ if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
+ return -EINVAL;
+
+ data->flex_fltr = true;
+
+ return 0;
+}
+
+/**
+ * ice_fdir_num_avail_fltr - return the number of unused flow director filters
+ * @hw: pointer to hardware structure
+ * @vsi: software VSI structure
+ *
+ * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
+ * use filters from either pool. The guaranteed pool is divided between VSIs.
+ * The best effort filter pool is common to all VSIs and is a device shared
+ * resource pool. The number of filters available to this VSI is the sum of
+ * the VSIs guaranteed filter pool and the global available best effort
+ * filter pool.
+ *
+ * Returns the number of available flow director filters to this VSI
+ */
+static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+{
+ u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ u16 num_guar;
+ u16 num_be;
+
+ /* total guaranteed filters assigned to this VSI */
+ num_guar = vsi->num_gfltr;
+
+ /* minus the guaranteed filters programed by this VSI */
+ num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
+ VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
+
+ /* total global best effort filters */
+ num_be = hw->func_caps.fd_fltr_best_effort;
+
+ /* minus the global best effort filters programmed */
+ num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
+ GLQF_FD_CNT_FD_BCNT_S;
+
+ return num_guar + num_be;
+}
+
+/**
+ * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
+ * @hw: HW structure containing the FDir flow profile structure(s)
+ * @flow: flow type to allocate the flow profile for
+ *
+ * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
+ * on success and negative on error.
+ */
+static int
+ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
+{
+ if (!hw)
+ return -EINVAL;
+
+ if (!hw->fdir_prof) {
+ hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
+ ICE_FLTR_PTYPE_MAX,
+ sizeof(*hw->fdir_prof),
+ GFP_KERNEL);
+ if (!hw->fdir_prof)
+ return -ENOMEM;
+ }
+
+ if (!hw->fdir_prof[flow]) {
+ hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(**hw->fdir_prof),
+ GFP_KERNEL);
+ if (!hw->fdir_prof[flow])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
+ * @pf: pointer to the PF structure
+ * @seg: protocol header description pointer
+ * @flow: filter enum
+ * @tun: FDir segment to program
+ */
+static int
+ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
+ enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *main_vsi, *ctrl_vsi;
+ struct ice_flow_seg_info *old_seg;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_fd_hw_prof *hw_prof;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u64 entry1_h = 0;
+ u64 entry2_h = 0;
+ u64 prof_id;
+ int err;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return -EINVAL;
+
+ ctrl_vsi = ice_get_ctrl_vsi(pf);
+ if (!ctrl_vsi)
+ return -EINVAL;
+
+ err = ice_fdir_alloc_flow_prof(hw, flow);
+ if (err)
+ return err;
+
+ hw_prof = hw->fdir_prof[flow];
+ old_seg = hw_prof->fdir_seg[tun];
+ if (old_seg) {
+ /* This flow_type already has a changed input set.
+ * If it matches the requested input set then we are
+ * done. Or, if it's different then it's an error.
+ */
+ if (!memcmp(old_seg, seg, sizeof(*seg)))
+ return -EEXIST;
+
+ /* if there are FDir filters using this flow,
+ * then return error.
+ */
+ if (hw->fdir_fltr_cnt[flow]) {
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ return -EINVAL;
+ }
+
+ if (ice_is_arfs_using_perfect_flow(hw, flow)) {
+ dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
+ flow);
+ return -EINVAL;
+ }
+
+ /* remove HW filter definition */
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
+ }
+
+ /* Adding a profile, but there is only one header supported.
+ * That is the final parameters are 1 header (segment), no
+ * actions (NULL) and zero actions 0.
+ */
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ TNL_SEG_CNT(tun), &prof);
+ if (status)
+ return ice_status_to_errno(status);
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_prof;
+ }
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_entry;
+ }
+
+ hw_prof->fdir_seg[tun] = seg;
+ hw_prof->entry_h[0][tun] = entry1_h;
+ hw_prof->entry_h[1][tun] = entry2_h;
+ hw_prof->vsi_h[0] = main_vsi->idx;
+ hw_prof->vsi_h[1] = ctrl_vsi->idx;
+ if (!hw_prof->cnt)
+ hw_prof->cnt = 2;
+
+ return 0;
+
+err_entry:
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+ ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
+err_prof:
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+
+ return err;
+}
+
+/**
+ * ice_set_init_fdir_seg
+ * @seg: flow segment for programming
+ * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
+ * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
+ *
+ * Set the configuration for perfect filters to the provided flow segment for
+ * programming the HW filter. This is to be called only when initializing
+ * filters as this function it assumes no filters exist.
+ */
+static int
+ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
+ enum ice_flow_seg_hdr l3_proto,
+ enum ice_flow_seg_hdr l4_proto)
+{
+ enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
+
+ if (!seg)
+ return -EINVAL;
+
+ if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
+ src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
+ dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
+ } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
+ src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
+ dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
+ } else {
+ return -EINVAL;
+ }
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else {
+ return -EINVAL;
+ }
+
+ ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
+
+ /* IP source address */
+ ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* IP destination address */
+ ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 source port */
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ /* Layer 4 destination port */
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
+
+ return 0;
+}
+
+/**
+ * ice_create_init_fdir_rule
+ * @pf: PF structure
+ * @flow: filter enum
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
+{
+ struct ice_flow_seg_info *seg, *tun_seg;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int ret;
+
+ /* if there is already a filter rule for kind return -EINVAL */
+ if (hw->fdir_prof && hw->fdir_prof[flow] &&
+ hw->fdir_prof[flow]->fdir_seg[0])
+ return -EINVAL;
+
+ seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return -ENOMEM;
+
+ tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ GFP_KERNEL);
+ if (!tun_seg) {
+ devm_kfree(dev, seg);
+ return -ENOMEM;
+ }
+
+ if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_SEG_HDR_TCP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_SEG_HDR_UDP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_SEG_HDR_TCP);
+ else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_SEG_HDR_UDP);
+ else
+ ret = -EINVAL;
+ if (ret)
+ goto err_exit;
+
+ /* add filter for outer headers */
+ ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
+ if (ret)
+ /* could not write filter, free memory */
+ goto err_exit;
+
+ /* make tunneled filter HW entries if possible */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+ ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
+ if (ret)
+ /* could not write tunnel filter, but outer header filter
+ * exists
+ */
+ devm_kfree(dev, tun_seg);
+
+ set_bit(flow, hw->fdir_perfect_fltr);
+ return ret;
+err_exit:
+ devm_kfree(dev, tun_seg);
+ devm_kfree(dev, seg);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_set_fdir_ip4_seg
+ * @seg: flow segment for programming
+ * @tcp_ip4_spec: mask data from ethtool
+ * @l4_proto: Layer 4 protocol to program
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the mask data into the flow segment to be used to program HW
+ * table based on provided L4 protocol for IPv4
+ */
+static int
+ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_tcpip4_spec *tcp_ip4_spec,
+ enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
+{
+ enum ice_flow_field src_port, dst_port;
+
+ /* make sure we don't have any empty rule */
+ if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
+ !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
+ return -EINVAL;
+
+ /* filtering on TOS not supported */
+ if (tcp_ip4_spec->tos)
+ return -EOPNOTSUPP;
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
+ src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
+
+ /* IP source address */
+ if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!tcp_ip4_spec->ip4src)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* IP destination address */
+ if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!tcp_ip4_spec->ip4dst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 source port */
+ if (tcp_ip4_spec->psrc == htons(0xFFFF))
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip4_spec->psrc)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 destination port */
+ if (tcp_ip4_spec->pdst == htons(0xFFFF))
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip4_spec->pdst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip4_usr_seg
+ * @seg: flow segment for programming
+ * @usr_ip4_spec: ethtool userdef packet offset
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the offset data into the flow segment to be used to program HW
+ * table for IPv4
+ */
+static int
+ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_usrip4_spec *usr_ip4_spec,
+ bool *perfect_fltr)
+{
+ /* first 4 bytes of Layer 4 header */
+ if (usr_ip4_spec->l4_4_bytes)
+ return -EINVAL;
+ if (usr_ip4_spec->tos)
+ return -EINVAL;
+ if (usr_ip4_spec->ip_ver)
+ return -EINVAL;
+ /* Filtering on Layer 4 protocol not supported */
+ if (usr_ip4_spec->proto)
+ return -EOPNOTSUPP;
+ /* empty rules are not valid */
+ if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
+ return -EINVAL;
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
+
+ /* IP source address */
+ if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!usr_ip4_spec->ip4src)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* IP destination address */
+ if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!usr_ip4_spec->ip4dst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip6_seg
+ * @seg: flow segment for programming
+ * @tcp_ip6_spec: mask data from ethtool
+ * @l4_proto: Layer 4 protocol to program
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the mask data into the flow segment to be used to program HW
+ * table based on provided L4 protocol for IPv6
+ */
+static int
+ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_tcpip6_spec *tcp_ip6_spec,
+ enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
+{
+ enum ice_flow_field src_port, dst_port;
+
+ /* make sure we don't have any empty rule */
+ if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
+ return -EINVAL;
+
+ /* filtering on TC not supported */
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
+ src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
+ src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
+ } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
+ src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
+ dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
+ } else {
+ return -EINVAL;
+ }
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
+
+ if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 source port */
+ if (tcp_ip6_spec->psrc == htons(0xFFFF))
+ ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip6_spec->psrc)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ /* Layer 4 destination port */
+ if (tcp_ip6_spec->pdst == htons(0xFFFF))
+ ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ false);
+ else if (!tcp_ip6_spec->pdst)
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_set_fdir_ip6_usr_seg
+ * @seg: flow segment for programming
+ * @usr_ip6_spec: ethtool userdef packet offset
+ * @perfect_fltr: only valid on success; returns true if perfect filter,
+ * false if not
+ *
+ * Set the offset data into the flow segment to be used to program HW
+ * table for IPv6
+ */
+static int
+ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_usrip6_spec *usr_ip6_spec,
+ bool *perfect_fltr)
+{
+ /* filtering on Layer 4 bytes not supported */
+ if (usr_ip6_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+ /* filtering on TC not supported */
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+ /* filtering on Layer 4 protocol not supported */
+ if (usr_ip6_spec->l4_proto)
+ return -EOPNOTSUPP;
+ /* empty rules are not valid */
+ if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)) &&
+ !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ return -EINVAL;
+
+ *perfect_fltr = true;
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
+
+ if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
+ sizeof(struct in6_addr)))
+ *perfect_fltr = false;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
+ * @pf: PF structure
+ * @fsp: pointer to ethtool Rx flow specification
+ * @user: user defined data from flow specification
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
+ struct ice_rx_flow_userdef *user)
+{
+ struct ice_flow_seg_info *seg, *tun_seg;
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_fltr_ptype fltr_idx;
+ struct ice_hw *hw = &pf->hw;
+ bool perfect_filter;
+ int ret;
+
+ seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return -ENOMEM;
+
+ tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ GFP_KERNEL);
+ if (!tun_seg) {
+ devm_kfree(dev, seg);
+ return -ENOMEM;
+ }
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_TCP,
+ &perfect_filter);
+ break;
+ case UDP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_UDP,
+ &perfect_filter);
+ break;
+ case SCTP_V4_FLOW:
+ ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
+ ICE_FLOW_SEG_HDR_SCTP,
+ &perfect_filter);
+ break;
+ case IPV4_USER_FLOW:
+ ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
+ &perfect_filter);
+ break;
+ case TCP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_TCP,
+ &perfect_filter);
+ break;
+ case UDP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_UDP,
+ &perfect_filter);
+ break;
+ case SCTP_V6_FLOW:
+ ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
+ ICE_FLOW_SEG_HDR_SCTP,
+ &perfect_filter);
+ break;
+ case IPV6_USER_FLOW:
+ ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
+ &perfect_filter);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto err_exit;
+
+ /* tunnel segments are shifted up one. */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+
+ if (user && user->flex_fltr) {
+ perfect_filter = false;
+ ice_flow_add_fld_raw(seg, user->flex_offset,
+ ICE_FLTR_PRGM_FLEX_WORD_SIZE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL);
+ ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
+ ICE_FLTR_PRGM_FLEX_WORD_SIZE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL);
+ }
+
+ /* add filter for outer headers */
+ fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
+ ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
+ ICE_FD_HW_SEG_NON_TUN);
+ if (ret == -EEXIST)
+ /* Rule already exists, free memory and continue */
+ devm_kfree(dev, seg);
+ else if (ret)
+ /* could not write filter, free memory */
+ goto err_exit;
+
+ /* make tunneled filter HW entries if possible */
+ memcpy(&tun_seg[1], seg, sizeof(*seg));
+ ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
+ ICE_FD_HW_SEG_TUN);
+ if (ret == -EEXIST) {
+ /* Rule already exists, free memory and count as success */
+ devm_kfree(dev, tun_seg);
+ ret = 0;
+ } else if (ret) {
+ /* could not write tunnel filter, but outer filter exists */
+ devm_kfree(dev, tun_seg);
+ }
+
+ if (perfect_filter)
+ set_bit(fltr_idx, hw->fdir_perfect_fltr);
+ else
+ clear_bit(fltr_idx, hw->fdir_perfect_fltr);
+
+ return ret;
+
+err_exit:
+ devm_kfree(dev, tun_seg);
+ devm_kfree(dev, seg);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_fdir_write_fltr - send a flow director filter to the hardware
+ * @pf: PF data structure
+ * @input: filter structure
+ * @add: true adds filter and false removed filter
+ * @is_tun: true adds inner filter on tunnel and false outer headers
+ *
+ * returns 0 on success and negative value on error
+ */
+int
+ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
+ bool is_tun)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fltr_desc desc;
+ struct ice_vsi *ctrl_vsi;
+ enum ice_status status;
+ u8 *pkt, *frag_pkt;
+ bool has_frag;
+ int err;
+
+ ctrl_vsi = ice_get_ctrl_vsi(pf);
+ if (!ctrl_vsi)
+ return -EINVAL;
+
+ pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+ frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
+ if (!frag_pkt) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ ice_fdir_get_prgm_desc(hw, input, &desc, add);
+ status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_free_all;
+ }
+ err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
+ if (err)
+ goto err_free_all;
+
+ /* repeat for fragment packet */
+ has_frag = ice_fdir_has_frag(input->flow_type);
+ if (has_frag) {
+ /* does not return error */
+ ice_fdir_get_prgm_desc(hw, input, &desc, add);
+ status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
+ is_tun);
+ if (status) {
+ err = ice_status_to_errno(status);
+ goto err_frag;
+ }
+ err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
+ if (err)
+ goto err_frag;
+ } else {
+ devm_kfree(dev, frag_pkt);
+ }
+
+ return 0;
+
+err_free_all:
+ devm_kfree(dev, frag_pkt);
+err_free:
+ devm_kfree(dev, pkt);
+ return err;
+
+err_frag:
+ devm_kfree(dev, frag_pkt);
+ return err;
+}
+
+/**
+ * ice_fdir_write_all_fltr - send a flow director filter to the hardware
+ * @pf: PF data structure
+ * @input: filter structure
+ * @add: true adds filter and false removed filter
+ *
+ * returns 0 on success and negative value on error
+ */
+static int
+ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ bool add)
+{
+ u16 port_num;
+ int tun;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ bool is_tun = tun == ICE_FD_HW_SEG_TUN;
+ int err;
+
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL,
+ &port_num))
+ continue;
+ err = ice_fdir_write_fltr(pf, input, add, is_tun);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/**
+ * ice_fdir_replay_fltrs - replay filters from the HW filter list
+ * @pf: board private structure
+ */
+void ice_fdir_replay_fltrs(struct ice_pf *pf)
+{
+ struct ice_fdir_fltr *f_rule;
+ struct ice_hw *hw = &pf->hw;
+
+ list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
+ int err = ice_fdir_write_all_fltr(pf, f_rule, true);
+
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
+ err, f_rule->fltr_id);
+ }
+}
+
+/**
+ * ice_fdir_create_dflt_rules - create default perfect filters
+ * @pf: PF data structure
+ *
+ * Returns 0 for success or error.
+ */
+int ice_fdir_create_dflt_rules(struct ice_pf *pf)
+{
+ int err;
+
+ /* Create perfect TCP and UDP rules in hardware. */
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
+ if (err)
+ return err;
+
+ err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+
+ return err;
+}
+
+/**
+ * ice_vsi_manage_fdir - turn on/off flow director
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is an enable or disable request
+ */
+void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_fdir_fltr *f_rule, *tmp;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_fltr_ptype flow;
+
+ if (ena) {
+ set_bit(ICE_FLAG_FD_ENA, pf->flags);
+ ice_fdir_create_dflt_rules(pf);
+ return;
+ }
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
+ goto release_lock;
+ list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
+ /* ignore return value */
+ ice_fdir_write_all_fltr(pf, f_rule, false);
+ ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
+ list_del(&f_rule->fltr_node);
+ devm_kfree(ice_hw_to_dev(hw), f_rule);
+ }
+
+ if (hw->fdir_prof)
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
+ flow++)
+ if (hw->fdir_prof[flow])
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
+
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+}
+
+/**
+ * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
+ * @pf: PF structure
+ * @flow_type: FDir flow type to release
+ */
+static void
+ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool need_perfect = false;
+
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ need_perfect = true;
+
+ if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
+ return;
+
+ ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
+ if (need_perfect)
+ ice_create_init_fdir_rule(pf, flow_type);
+}
+
+/**
+ * ice_fdir_update_list_entry - add or delete a filter from the filter list
+ * @pf: PF structure
+ * @input: filter structure
+ * @fltr_idx: ethtool index of filter to modify
+ *
+ * returns 0 on success and negative on errors
+ */
+static int
+ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
+ int fltr_idx)
+{
+ struct ice_fdir_fltr *old_fltr;
+ struct ice_hw *hw = &pf->hw;
+ int err = -ENOENT;
+
+ /* Do not update filters during reset */
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+
+ old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
+ if (old_fltr) {
+ err = ice_fdir_write_all_fltr(pf, old_fltr, false);
+ if (err)
+ return err;
+ ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
+ if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
+ /* we just deleted the last filter of flow_type so we
+ * should also delete the HW filter info.
+ */
+ ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
+ list_del(&old_fltr->fltr_node);
+ devm_kfree(ice_hw_to_dev(hw), old_fltr);
+ }
+ if (!input)
+ return err;
+ ice_fdir_list_add_fltr(hw, input);
+ ice_fdir_update_cntrs(hw, input->flow_type, true);
+ return 0;
+}
+
+/**
+ * ice_del_fdir_ethtool - delete Flow Director filter
+ * @vsi: pointer to target VSI
+ * @cmd: command to add or delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int val;
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ /* Do not delete filters during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(__ICE_FD_FLUSH_REQ, pf->state))
+ return -EBUSY;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
+ mutex_unlock(&hw->fdir_fltr_lock);
+
+ return val;
+}
+
+/**
+ * ice_set_fdir_input_set - Set the input set for Flow Director
+ * @vsi: pointer to target VSI
+ * @fsp: pointer to ethtool Rx flow specification
+ * @input: filter structure
+ */
+static int
+ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
+ struct ice_fdir_fltr *input)
+{
+ u16 dest_vsi, q_index = 0;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int flow_type;
+ u8 dest_ctl;
+
+ if (!vsi || !fsp || !input)
+ return -EINVAL;
+
+ pf = vsi->back;
+ hw = &pf->hw;
+
+ dest_vsi = vsi->idx;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (vf) {
+ dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
+ return -EINVAL;
+ }
+
+ if (ring >= vsi->num_rxq)
+ return -EINVAL;
+
+ dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ q_index = ring;
+ }
+
+ input->fltr_id = fsp->location;
+ input->q_index = q_index;
+ flow_type = fsp->flow_type & ~FLOW_EXT;
+
+ input->dest_vsi = dest_vsi;
+ input->dest_ctl = dest_ctl;
+ input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+ input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
+ input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ memcpy(input->ext_data.usr_def, fsp->h_ext.data,
+ sizeof(input->ext_data.usr_def));
+ input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
+ input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
+ memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
+ sizeof(input->ext_mask.usr_def));
+ input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
+ input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
+ }
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
+ break;
+ case IPV4_USER_FLOW:
+ input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
+ input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+ input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
+ input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
+ input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
+ input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
+ input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
+ input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
+ input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
+ input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
+ input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
+ memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
+ input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
+ input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
+ input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_fdir_ethtool - Add/Remove Flow Director filter
+ * @vsi: pointer to target VSI
+ * @cmd: command to add or delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
+{
+ struct ice_rx_flow_userdef userdata;
+ struct ethtool_rx_flow_spec *fsp;
+ struct ice_fdir_fltr *input;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int fltrs_needed;
+ u16 tunnel_port;
+ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+
+ pf = vsi->back;
+ hw = &pf->hw;
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ /* Do not program filters during reset */
+ if (ice_is_reset_in_progress(pf->state)) {
+ dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
+ return -EBUSY;
+ }
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (ice_parse_rx_flow_user_data(fsp, &userdata))
+ return -EINVAL;
+
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
+ if (ret)
+ return ret;
+
+ if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ return -ENOSPC;
+ }
+
+ /* return error if not an update and no available filters */
+ fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ?
+ 2 : 1;
+ if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
+ ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ return -ENOSPC;
+ }
+
+ input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ ret = ice_set_fdir_input_set(vsi, fsp, input);
+ if (ret)
+ goto free_input;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ if (ice_fdir_is_dup_fltr(hw, input)) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ if (userdata.flex_fltr) {
+ input->flex_fltr = true;
+ input->flex_word = cpu_to_be16(userdata.flex_word);
+ input->flex_offset = userdata.flex_offset;
+ }
+
+ /* input struct is added to the HW filter list */
+ ice_fdir_update_list_entry(pf, input, fsp->location);
+
+ ret = ice_fdir_write_all_fltr(pf, input, true);
+ if (ret)
+ goto remove_sw_rule;
+
+ goto release_lock;
+
+remove_sw_rule:
+ ice_fdir_update_cntrs(hw, input->flow_type, false);
+ list_del(&input->fltr_node);
+release_lock:
+ mutex_unlock(&hw->fdir_fltr_lock);
+free_input:
+ if (ret)
+ devm_kfree(dev, input);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
new file mode 100644
index 000000000000..6834df14332f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice_common.h"
+
+/* These are training packet headers used to program flow director filters. */
+static const u8 ice_fdir_tcpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 ice_fdir_udpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctpv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x20, 0x00, 0x00, 0x40, 0x00, 0x40, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00
+};
+
+static const u8 ice_fdir_tcpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x06, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x50, 0x00, 0x20, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctpv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x0C, 0x84, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ipv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3B, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_tcp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctp4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x52, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x20, 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ip4_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x46, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_tcp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x6e, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x50, 0x00, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_udp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x62, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_sctp6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x66, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x84, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 ice_fdir_ip6_tun_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x5a, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* Flow Director no-op training packet table */
+static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,
+ sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ sizeof(ice_fdir_udpv4_pkt), ice_fdir_udpv4_pkt,
+ sizeof(ice_fdir_udp4_tun_pkt), ice_fdir_udp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ sizeof(ice_fdir_sctpv4_pkt), ice_fdir_sctpv4_pkt,
+ sizeof(ice_fdir_sctp4_tun_pkt), ice_fdir_sctp4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ sizeof(ice_fdir_ipv4_pkt), ice_fdir_ipv4_pkt,
+ sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP,
+ sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt,
+ sizeof(ice_fdir_tcp6_tun_pkt), ice_fdir_tcp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ sizeof(ice_fdir_udpv6_pkt), ice_fdir_udpv6_pkt,
+ sizeof(ice_fdir_udp6_tun_pkt), ice_fdir_udp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP,
+ sizeof(ice_fdir_sctpv6_pkt), ice_fdir_sctpv6_pkt,
+ sizeof(ice_fdir_sctp6_tun_pkt), ice_fdir_sctp6_tun_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_OTHER,
+ sizeof(ice_fdir_ipv6_pkt), ice_fdir_ipv6_pkt,
+ sizeof(ice_fdir_ip6_tun_pkt), ice_fdir_ip6_tun_pkt,
+ },
+};
+
+#define ICE_FDIR_NUM_PKT ARRAY_SIZE(ice_fdir_pkt)
+
+/**
+ * ice_set_dflt_val_fd_desc
+ * @fd_fltr_ctx: pointer to fd filter descriptor
+ */
+static void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx)
+{
+ fd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
+ fd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+ fd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST;
+ fd_fltr_ctx->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ fd_fltr_ctx->evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE;
+ fd_fltr_ctx->toq = ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX;
+ fd_fltr_ctx->toq_prio = ICE_FXD_FLTR_QW0_TO_Q_PRIO1;
+ fd_fltr_ctx->dpu_recipe = ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT;
+ fd_fltr_ctx->drop = ICE_FXD_FLTR_QW0_DROP_NO;
+ fd_fltr_ctx->flex_prio = ICE_FXD_FLTR_QW0_FLEX_PRI_NONE;
+ fd_fltr_ctx->flex_mdid = ICE_FXD_FLTR_QW0_FLEX_MDID0;
+ fd_fltr_ctx->flex_val = ICE_FXD_FLTR_QW0_FLEX_VAL0;
+ fd_fltr_ctx->dtype = ICE_TX_DESC_DTYPE_FLTR_PROG;
+ fd_fltr_ctx->desc_prof_prio = ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO;
+ fd_fltr_ctx->desc_prof = ICE_FXD_FLTR_QW1_PROF_ZERO;
+ fd_fltr_ctx->swap = ICE_FXD_FLTR_QW1_SWAP_SET;
+ fd_fltr_ctx->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
+ fd_fltr_ctx->fdid_mdid = ICE_FXD_FLTR_QW1_FDID_MDID_FD;
+ fd_fltr_ctx->fdid = ICE_FXD_FLTR_QW1_FDID_ZERO;
+}
+
+/**
+ * ice_set_fd_desc_val
+ * @ctx: pointer to fd filter descriptor context
+ * @fdir_desc: populated with fd filter descriptor values
+ */
+static void
+ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx,
+ struct ice_fltr_desc *fdir_desc)
+{
+ u64 qword;
+
+ /* prep QW0 of FD filter programming desc */
+ qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) &
+ ICE_FXD_FLTR_QW0_QINDEX_M;
+ qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) &
+ ICE_FXD_FLTR_QW0_COMP_Q_M;
+ qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) &
+ ICE_FXD_FLTR_QW0_COMP_REPORT_M;
+ qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) &
+ ICE_FXD_FLTR_QW0_FD_SPACE_M;
+ qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) &
+ ICE_FXD_FLTR_QW0_STAT_CNT_M;
+ qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) &
+ ICE_FXD_FLTR_QW0_STAT_ENA_M;
+ qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) &
+ ICE_FXD_FLTR_QW0_EVICT_ENA_M;
+ qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) &
+ ICE_FXD_FLTR_QW0_TO_Q_M;
+ qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) &
+ ICE_FXD_FLTR_QW0_TO_Q_PRI_M;
+ qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) &
+ ICE_FXD_FLTR_QW0_DPU_RECIPE_M;
+ qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) &
+ ICE_FXD_FLTR_QW0_DROP_M;
+ qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) &
+ ICE_FXD_FLTR_QW0_FLEX_PRI_M;
+ qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) &
+ ICE_FXD_FLTR_QW0_FLEX_MDID_M;
+ qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) &
+ ICE_FXD_FLTR_QW0_FLEX_VAL_M;
+ fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword);
+
+ /* prep QW1 of FD filter programming desc */
+ qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) &
+ ICE_FXD_FLTR_QW1_DTYPE_M;
+ qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) &
+ ICE_FXD_FLTR_QW1_PCMD_M;
+ qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) &
+ ICE_FXD_FLTR_QW1_PROF_PRI_M;
+ qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) &
+ ICE_FXD_FLTR_QW1_PROF_M;
+ qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) &
+ ICE_FXD_FLTR_QW1_FD_VSI_M;
+ qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) &
+ ICE_FXD_FLTR_QW1_SWAP_M;
+ qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) &
+ ICE_FXD_FLTR_QW1_FDID_PRI_M;
+ qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) &
+ ICE_FXD_FLTR_QW1_FDID_MDID_M;
+ qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) &
+ ICE_FXD_FLTR_QW1_FDID_M;
+ fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword);
+}
+
+/**
+ * ice_fdir_get_prgm_desc - set a fdir descriptor from a fdir filter struct
+ * @hw: pointer to the hardware structure
+ * @input: filter
+ * @fdesc: filter descriptor
+ * @add: if add is true, this is an add operation, false implies delete
+ */
+void
+ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ struct ice_fltr_desc *fdesc, bool add)
+{
+ struct ice_fd_fltr_desc_ctx fdir_fltr_ctx = { 0 };
+
+ /* set default context info */
+ ice_set_dflt_val_fd_desc(&fdir_fltr_ctx);
+
+ /* change sideband filtering values */
+ fdir_fltr_ctx.fdid = input->fltr_id;
+ if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+ fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES;
+ fdir_fltr_ctx.qindex = 0;
+ } else {
+ fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
+ fdir_fltr_ctx.qindex = input->q_index;
+ }
+ fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ fdir_fltr_ctx.cnt_index = input->cnt_index;
+ fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi);
+ fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE;
+ fdir_fltr_ctx.toq_prio = 3;
+ fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD :
+ ICE_FXD_FLTR_QW1_PCMD_REMOVE;
+ fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET;
+ fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
+ fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
+ fdir_fltr_ctx.fdid_prio = 3;
+ fdir_fltr_ctx.desc_prof = 1;
+ fdir_fltr_ctx.desc_prof_prio = 3;
+ ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc);
+}
+
+/**
+ * ice_alloc_fd_res_cntr - obtain counter resource for FD type
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ */
+enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
+}
+
+/**
+ * ice_free_fd_res_cntr - Free counter resource for FD type
+ * @hw: pointer to the hardware structure
+ * @cntr_id: counter index to be freed
+ */
+enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
+{
+ return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
+}
+
+/**
+ * ice_alloc_fd_guar_item - allocate resource for FD guaranteed entries
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ * @num_fltr: number of filter entries to be allocated
+ */
+enum ice_status
+ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
+ cntr_id);
+}
+
+/**
+ * ice_alloc_fd_shrd_item - allocate resource for flow director shared entries
+ * @hw: pointer to the hardware structure
+ * @cntr_id: returns counter index
+ * @num_fltr: number of filter entries to be allocated
+ */
+enum ice_status
+ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
+ cntr_id);
+}
+
+/**
+ * ice_get_fdir_cnt_all - get the number of Flow Director filters
+ * @hw: hardware data structure
+ *
+ * Returns the number of filters available on device
+ */
+int ice_get_fdir_cnt_all(struct ice_hw *hw)
+{
+ return hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort;
+}
+
+/**
+ * ice_pkt_insert_ipv6_addr - insert a be32 IPv6 address into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @addr: IPv6 address to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr)
+{
+ int idx;
+
+ for (idx = 0; idx < ICE_IPV6_ADDR_LEN_AS_U32; idx++)
+ memcpy(pkt + offset + idx * sizeof(*addr), &addr[idx],
+ sizeof(*addr));
+}
+
+/**
+ * ice_pkt_insert_u16 - insert a be16 value into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 16 bit value to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_u16(u8 *pkt, int offset, __be16 data)
+{
+ memcpy(pkt + offset, &data, sizeof(data));
+}
+
+/**
+ * ice_pkt_insert_u32 - insert a be32 value into a memory buffer
+ * @pkt: packet buffer
+ * @offset: offset into buffer
+ * @data: 32 bit value to convert and insert into pkt at offset
+ */
+static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data)
+{
+ memcpy(pkt + offset, &data, sizeof(data));
+}
+
+/**
+ * ice_fdir_get_gen_prgm_pkt - generate a training packet
+ * @hw: pointer to the hardware structure
+ * @input: flow director filter data structure
+ * @pkt: pointer to return filter packet
+ * @frag: generate a fragment packet
+ * @tun: true implies generate a tunnel packet
+ */
+enum ice_status
+ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun)
+{
+ enum ice_fltr_ptype flow;
+ u16 tnl_port;
+ u8 *loc;
+ u16 idx;
+
+ if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ switch (input->ip.v4.proto) {
+ case IPPROTO_TCP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ break;
+ case IPPROTO_UDP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ break;
+ case IPPROTO_SCTP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ break;
+ case IPPROTO_IP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ } else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
+ switch (input->ip.v6.proto) {
+ case IPPROTO_TCP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+ break;
+ case IPPROTO_UDP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+ break;
+ case IPPROTO_SCTP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+ break;
+ case IPPROTO_IP:
+ flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ } else {
+ flow = input->flow_type;
+ }
+
+ for (idx = 0; idx < ICE_FDIR_NUM_PKT; idx++)
+ if (ice_fdir_pkt[idx].flow == flow)
+ break;
+ if (idx == ICE_FDIR_NUM_PKT)
+ return ICE_ERR_PARAM;
+ if (!tun) {
+ memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
+ loc = pkt;
+ } else {
+ if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port))
+ return ICE_ERR_DOES_NOT_EXIST;
+ if (!ice_fdir_pkt[idx].tun_pkt)
+ return ICE_ERR_PARAM;
+ memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
+ ice_fdir_pkt[idx].tun_pkt_len);
+ ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
+ htons(tnl_port));
+ loc = &pkt[ICE_FDIR_TUN_PKT_OFF];
+ }
+
+ /* Reverse the src and dst, since the HW expects them to be from Tx
+ * perspective. The input from user is from Rx filter perspective.
+ */
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_TCP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ if (frag)
+ loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_UDP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_TCP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_UDP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ if (input->flex_fltr)
+ ice_pkt_insert_u16(loc, input->flex_offset, input->flex_word);
+
+ return 0;
+}
+
+/**
+ * ice_fdir_has_frag - does flow type have 2 ptypes
+ * @flow: flow ptype
+ *
+ * returns true is there is a fragment packet for this ptype
+ */
+bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
+{
+ if (flow == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ice_fdir_find_by_idx - find filter with idx
+ * @hw: pointer to hardware structure
+ * @fltr_idx: index to find.
+ *
+ * Returns pointer to filter if found or null
+ */
+struct ice_fdir_fltr *
+ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx)
+{
+ struct ice_fdir_fltr *rule;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ /* rule ID found in the list */
+ if (fltr_idx == rule->fltr_id)
+ return rule;
+ if (fltr_idx < rule->fltr_id)
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * ice_fdir_list_add_fltr - add a new node to the flow director filter list
+ * @hw: hardware structure
+ * @fltr: filter node to add to structure
+ */
+void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *fltr)
+{
+ struct ice_fdir_fltr *rule, *parent = NULL;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ /* rule ID found or pass its spot in the list */
+ if (rule->fltr_id >= fltr->fltr_id)
+ break;
+ parent = rule;
+ }
+
+ if (parent)
+ list_add(&fltr->fltr_node, &parent->fltr_node);
+ else
+ list_add(&fltr->fltr_node, &hw->fdir_list_head);
+}
+
+/**
+ * ice_fdir_update_cntrs - increment / decrement filter counter
+ * @hw: pointer to hardware structure
+ * @flow: filter flow type
+ * @add: true implies filters added
+ */
+void
+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add)
+{
+ int incr;
+
+ incr = add ? 1 : -1;
+ hw->fdir_active_fltr += incr;
+
+ if (flow == ICE_FLTR_PTYPE_NONF_NONE || flow >= ICE_FLTR_PTYPE_MAX)
+ ice_debug(hw, ICE_DBG_SW, "Unknown filter type %d\n", flow);
+ else
+ hw->fdir_fltr_cnt[flow] += incr;
+}
+
+/**
+ * ice_cmp_ipv6_addr - compare 2 IP v6 addresses
+ * @a: IP v6 address
+ * @b: IP v6 address
+ *
+ * Returns 0 on equal, returns non-0 if different
+ */
+static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
+{
+ return memcmp(a, b, 4 * sizeof(__be32));
+}
+
+/**
+ * ice_fdir_comp_rules - compare 2 filters
+ * @a: a Flow Director filter data structure
+ * @b: a Flow Director filter data structure
+ * @v6: bool true if v6 filter
+ *
+ * Returns true if the filters match
+ */
+static bool
+ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
+{
+ enum ice_fltr_ptype flow_type = a->flow_type;
+
+ /* The calling function already checks that the two filters have the
+ * same flow_type.
+ */
+ if (!v6) {
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.dst_port == b->ip.v4.dst_port &&
+ a->ip.v4.src_port == b->ip.v4.src_port)
+ return true;
+ } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.l4_header == b->ip.v4.l4_header &&
+ a->ip.v4.proto == b->ip.v4.proto &&
+ a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
+ a->ip.v4.tos == b->ip.v4.tos)
+ return true;
+ }
+ } else {
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port &&
+ !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
+ b->ip.v6.dst_ip) &&
+ !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
+ b->ip.v6.src_ip))
+ return true;
+ } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * ice_fdir_is_dup_fltr - test if filter is already in list for PF
+ * @hw: hardware data structure
+ * @input: Flow Director filter data structure
+ *
+ * Returns true if the filter is found in the list
+ */
+bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
+{
+ struct ice_fdir_fltr *rule;
+ bool ret = false;
+
+ list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
+ enum ice_fltr_ptype flow_type;
+
+ if (rule->flow_type != input->flow_type)
+ continue;
+
+ flow_type = input->flow_type;
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ ret = ice_fdir_comp_rules(rule, input, false);
+ else
+ ret = ice_fdir_comp_rules(rule, input, true);
+ if (ret) {
+ if (rule->fltr_id == input->fltr_id &&
+ rule->q_index != input->q_index)
+ ret = false;
+ else
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
new file mode 100644
index 000000000000..1c587766daab
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_FDIR_H_
+#define _ICE_FDIR_H_
+
+#define ICE_FDIR_TUN_PKT_OFF 50
+#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
+
+/* macros for offsets into packets for flow director programming */
+#define ICE_IPV4_SRC_ADDR_OFFSET 26
+#define ICE_IPV4_DST_ADDR_OFFSET 30
+#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_TCP_DST_PORT_OFFSET 36
+#define ICE_IPV4_UDP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_UDP_DST_PORT_OFFSET 36
+#define ICE_IPV4_SCTP_SRC_PORT_OFFSET 34
+#define ICE_IPV4_SCTP_DST_PORT_OFFSET 36
+#define ICE_IPV4_PROTO_OFFSET 23
+#define ICE_IPV6_SRC_ADDR_OFFSET 22
+#define ICE_IPV6_DST_ADDR_OFFSET 38
+#define ICE_IPV6_TCP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_TCP_DST_PORT_OFFSET 56
+#define ICE_IPV6_UDP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_UDP_DST_PORT_OFFSET 56
+#define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54
+#define ICE_IPV6_SCTP_DST_PORT_OFFSET 56
+/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF
+ * requests that the packet not be fragmented. MF indicates that a packet has
+ * been fragmented.
+ */
+#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20
+
+enum ice_fltr_prgm_desc_dest {
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
+};
+
+enum ice_fltr_prgm_desc_fd_status {
+ ICE_FLTR_PRGM_DESC_FD_STATUS_NONE,
+ ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID,
+};
+
+/* Flow Director (FD) Filter Programming descriptor */
+struct ice_fd_fltr_desc_ctx {
+ u32 fdid;
+ u16 qindex;
+ u16 cnt_index;
+ u16 fd_vsi;
+ u16 flex_val;
+ u8 comp_q;
+ u8 comp_report;
+ u8 fd_space;
+ u8 cnt_ena;
+ u8 evict_ena;
+ u8 toq;
+ u8 toq_prio;
+ u8 dpu_recipe;
+ u8 drop;
+ u8 flex_prio;
+ u8 flex_mdid;
+ u8 dtype;
+ u8 pcmd;
+ u8 desc_prof_prio;
+ u8 desc_prof;
+ u8 swap;
+ u8 fdid_prio;
+ u8 fdid_mdid;
+};
+
+#define ICE_FLTR_PRGM_FLEX_WORD_SIZE sizeof(__be16)
+
+struct ice_rx_flow_userdef {
+ u16 flex_word;
+ u16 flex_offset;
+ u16 flex_fltr;
+};
+
+struct ice_fdir_v4 {
+ __be32 dst_ip;
+ __be32 src_ip;
+ __be16 dst_port;
+ __be16 src_port;
+ __be32 l4_header;
+ __be32 sec_parm_idx; /* security parameter index */
+ u8 tos;
+ u8 ip_ver;
+ u8 proto;
+};
+
+#define ICE_IPV6_ADDR_LEN_AS_U32 4
+
+struct ice_fdir_v6 {
+ __be32 dst_ip[ICE_IPV6_ADDR_LEN_AS_U32];
+ __be32 src_ip[ICE_IPV6_ADDR_LEN_AS_U32];
+ __be16 dst_port;
+ __be16 src_port;
+ __be32 l4_header; /* next header */
+ __be32 sec_parm_idx; /* security parameter index */
+ u8 tc;
+ u8 proto;
+};
+
+struct ice_fdir_extra {
+ u8 dst_mac[ETH_ALEN]; /* dest MAC address */
+ u32 usr_def[2]; /* user data */
+ __be16 vlan_type; /* VLAN ethertype */
+ __be16 vlan_tag; /* VLAN tag info */
+};
+
+struct ice_fdir_fltr {
+ struct list_head fltr_node;
+ enum ice_fltr_ptype flow_type;
+
+ union {
+ struct ice_fdir_v4 v4;
+ struct ice_fdir_v6 v6;
+ } ip, mask;
+
+ struct ice_fdir_extra ext_data;
+ struct ice_fdir_extra ext_mask;
+
+ /* flex byte filter data */
+ __be16 flex_word;
+ u16 flex_offset;
+ u16 flex_fltr;
+
+ /* filter control */
+ u16 q_index;
+ u16 dest_vsi;
+ u8 dest_ctl;
+ u8 fltr_status;
+ u16 cnt_index;
+ u32 fltr_id;
+};
+
+/* Dummy packet filter definition structure */
+struct ice_fdir_base_pkt {
+ enum ice_fltr_ptype flow;
+ u16 pkt_len;
+ const u8 *pkt;
+ u16 tun_pkt_len;
+ const u8 *tun_pkt;
+};
+
+enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+enum ice_status
+ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+enum ice_status
+ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+void
+ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ struct ice_fltr_desc *fdesc, bool add);
+enum ice_status
+ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
+ u8 *pkt, bool frag, bool tun);
+int ice_get_fdir_cnt_all(struct ice_hw *hw);
+bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
+struct ice_fdir_fltr *
+ice_fdir_find_fltr_by_idx(struct ice_hw *hw, u32 fltr_idx);
+void
+ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow, bool add);
+void ice_fdir_list_add_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
+#endif /* _ICE_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 42bac3ec5526..4420fc02f7e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -5,6 +5,15 @@
#include "ice_flex_pipe.h"
#include "ice_flow.h"
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+static const struct ice_tunnel_type_scan tnls[] = {
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_LAST, "" }
+};
+
static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
/* SWITCH */
{
@@ -239,6 +248,268 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
return state->sect;
}
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = section;
+ if (index >= le16_to_cpu(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ tcam = ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && le16_to_cpu(tcam->addr) == addr) {
+ *entry = tcam;
+ return 0;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = section;
+ if (index >= le16_to_cpu(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+ u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = le16_to_cpu(label->value);
+ return label->name;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ memset(&hw->tnl, 0, sizeof(hw->tnl));
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].in_use = false;
+ hw->tnl.tbl[hw->tnl.count].marked = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry)
+ hw->tnl.tbl[i].valid = true;
+ }
+}
+
/* Key creation */
#define ICE_DC_KEY 0x1 /* don't care */
@@ -593,8 +864,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
u32 i;
ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
- pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
/* Search all package segments for the requested segment type */
for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
@@ -764,13 +1036,15 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
- ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
- ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
le32_to_cpu(ice_seg->hdr.seg_type),
- le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
ice_buf_tbl = ice_find_buf_table(ice_seg);
@@ -815,14 +1089,16 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
- hw->ice_pkg_ver = seg_hdr->seg_ver;
- memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
+ hw->ice_pkg_ver = seg_hdr->seg_format_ver;
+ memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
sizeof(hw->ice_pkg_name));
- ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
- seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
- seg_hdr->seg_name);
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft,
+ seg_hdr->seg_id);
} else {
ice_debug(hw, ICE_DBG_INIT,
"Did not find ice segment in driver package\n");
@@ -863,9 +1139,11 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
if (pkg_info->pkg_info[i].is_active) {
flags[place++] = 'A';
hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ le32_to_cpu(pkg_info->pkg_info[i].track_id);
memcpy(hw->active_pkg_name,
pkg_info->pkg_info[i].name,
- sizeof(hw->active_pkg_name));
+ sizeof(pkg_info->pkg_info[i].name));
hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
}
if (pkg_info->pkg_info[i].is_active_at_boot)
@@ -905,10 +1183,10 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
if (len < sizeof(*pkg))
return ICE_ERR_BUF_TOO_SHORT;
- if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
return ICE_ERR_CFG;
/* pkg must have at least one segment */
@@ -990,6 +1268,68 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
}
/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_status
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ /* Check package version compatibility */
+ status = ice_chk_pkg_version(&hw->pkg_ver);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return status;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ pkg = kzalloc(size, GFP_KERNEL);
+ if (!pkg)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
+ if (status)
+ goto fw_ddp_compat_free_alloc;
+
+ for (i = 0; i < le32_to_cpu(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ status = ICE_ERR_FW_DDP_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT,
+ "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ kfree(pkg);
+ return status;
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1039,18 +1379,12 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
/* before downloading the package, check package version for
* compatibility with driver
*/
- status = ice_chk_pkg_version(&hw->pkg_ver);
+ status = ice_chk_pkg_compat(hw, pkg, &seg);
if (status)
return status;
- /* find segment in given package */
- seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
- if (!seg) {
- ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
- }
-
- /* download package */
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
status = ice_download_pkg(hw, seg);
if (status == ICE_ERR_AQ_NO_WORK) {
ice_debug(hw, ICE_DBG_INIT,
@@ -1292,6 +1626,284 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
return &bld->buf;
}
+/**
+ * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_tunnel_port_in_use
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
+{
+ bool res;
+
+ mutex_lock(&hw->tnl_lock);
+ res = ice_tunnel_port_in_use_hlpr(hw, port, index);
+ mutex_unlock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_find_free_tunnel_entry
+ * @hw: pointer to the HW structure
+ * @type: tunnel type
+ * @index: optionally returns index
+ *
+ * Returns whether there is a free tunnel entry, and optionally its index
+ */
+static bool
+ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
+ hw->tnl.tbl[i].type == type) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
+ * @hw: pointer to the HW structure
+ * @type: tunnel type (TNL_ALL will return any open port)
+ * @port: returns open port
+ */
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port)
+{
+ bool res = false;
+ u16 i;
+
+ mutex_lock(&hw->tnl_lock);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
+ }
+
+ mutex_unlock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_create_tunnel
+ * @hw: pointer to the HW structure
+ * @type: type of tunnel
+ * @port: port of tunnel to create
+ *
+ * Create a tunnel by updating the parse graph in the parser. We do that by
+ * creating a package buffer with the tunnel info and issuing an update package
+ * command.
+ */
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 index;
+
+ mutex_lock(&hw->tnl_lock);
+
+ if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
+ hw->tnl.tbl[index].ref++;
+ status = 0;
+ goto ice_create_tunnel_end;
+ }
+
+ if (!ice_find_free_tunnel_entry(hw, type, &index)) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto ice_create_tunnel_end;
+ }
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_create_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_create_tunnel_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(*sect_rx));
+ if (!sect_rx)
+ goto ice_create_tunnel_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ sizeof(*sect_tx));
+ if (!sect_tx)
+ goto ice_create_tunnel_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer */
+ memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam));
+
+ /* over-write the never-match dest port key bits with the encoded port
+ * bits
+ */
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ (u8 *)&port, NULL, NULL, NULL,
+ (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
+ sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
+
+ /* exact copy of entry to Tx section entry */
+ memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status) {
+ hw->tnl.tbl[index].port = port;
+ hw->tnl.tbl[index].in_use = true;
+ hw->tnl.tbl[index].ref = 1;
+ }
+
+ice_create_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_create_tunnel_end:
+ mutex_unlock(&hw->tnl_lock);
+
+ return status;
+}
+
+/**
+ * ice_destroy_tunnel
+ * @hw: pointer to the HW structure
+ * @port: port of tunnel to destroy (ignored if the all parameter is true)
+ * @all: flag that states to destroy all tunnels
+ *
+ * Destroys a tunnel or all tunnels by creating an update package buffer
+ * targeting the specific updates requested and then performing an update
+ * package.
+ */
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 count = 0;
+ u16 index;
+ u16 size;
+ u16 i;
+
+ mutex_lock(&hw->tnl_lock);
+
+ if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
+ if (hw->tnl.tbl[index].ref > 1) {
+ hw->tnl.tbl[index].ref--;
+ status = 0;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* determine count */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port))
+ count++;
+
+ if (!count) {
+ status = ICE_ERR_PARAM;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* size of section - there is at least one entry */
+ size = struct_size(sect_rx, tcam, count - 1);
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_destroy_tunnel_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_rx)
+ goto ice_destroy_tunnel_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_tx)
+ goto ice_destroy_tunnel_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer, one copy to Rx
+ * section, another copy to the Tx section
+ */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port)) {
+ memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_rx->tcam));
+ memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_tx->tcam));
+ hw->tnl.tbl[i].marked = true;
+ }
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status)
+ for (i = 0; i < hw->tnl.count &&
+ i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].marked) {
+ hw->tnl.tbl[i].ref = 0;
+ hw->tnl.tbl[i].port = 0;
+ hw->tnl.tbl[i].in_use = false;
+ hw->tnl.tbl[i].marked = false;
+ }
+
+ice_destroy_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_destroy_tunnel_end:
+ mutex_unlock(&hw->tnl_lock);
+
+ return status;
+}
+
/* PTG Management */
/**
@@ -1807,9 +2419,16 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
- u16 off, i;
+ u16 off;
+ u8 i;
- for (i = 0; i < es->count; i++) {
+ /* For FD, we don't want to re-use a existed profile with the same
+ * field vector and mask. This will cause rule interference.
+ */
+ if (blk == ICE_BLK_FD)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ for (i = 0; i < (u8)es->count; i++) {
off = i * es->fvw;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
@@ -1830,6 +2449,9 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
{
switch (blk) {
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
+ break;
case ICE_BLK_RSS:
*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
break;
@@ -1847,6 +2469,9 @@ static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
{
switch (blk) {
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
+ break;
case ICE_BLK_RSS:
*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
break;
@@ -2290,6 +2915,12 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
mutex_lock(&hw->fl_profs_locks[blk_idx]);
list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
+ struct ice_flow_entry *e, *t;
+
+ list_for_each_entry_safe(e, t, &p->entries, l_entry)
+ ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
+ ICE_FLOW_ENTRY_HNDL(e));
+
list_del(&p->l_entry);
devm_kfree(ice_hw_to_dev(hw), p);
}
@@ -2919,6 +3550,212 @@ error_tmp:
}
/**
+ * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @mask_sel: mask select
+ *
+ * This function enable any of the masks selected by the mask select parameter
+ * for the profile specified.
+ */
+static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
+{
+ wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
+
+ ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
+ GLQF_FDMASK_SEL(prof_id), mask_sel);
+}
+
+struct ice_fd_src_dst_pair {
+ u8 prot_id;
+ u8 count;
+ u16 off;
+};
+
+static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
+ /* These are defined in pairs */
+ { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
+ { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
+
+ { ICE_PROT_IPV4_IL, 2, 12 },
+ { ICE_PROT_IPV4_IL, 2, 16 },
+
+ { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
+ { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
+
+ { ICE_PROT_IPV6_IL, 8, 8 },
+ { ICE_PROT_IPV6_IL, 8, 24 },
+
+ { ICE_PROT_TCP_IL, 1, 0 },
+ { ICE_PROT_TCP_IL, 1, 2 },
+
+ { ICE_PROT_UDP_OF, 1, 0 },
+ { ICE_PROT_UDP_OF, 1, 2 },
+
+ { ICE_PROT_UDP_IL_OR_S, 1, 0 },
+ { ICE_PROT_UDP_IL_OR_S, 1, 2 },
+
+ { ICE_PROT_SCTP_IL, 1, 0 },
+ { ICE_PROT_SCTP_IL, 1, 2 }
+};
+
+#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
+
+/**
+ * ice_update_fd_swap - set register appropriately for a FD FV extraction
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ * @es: extraction sequence (length of array is determined by the block)
+ */
+static enum ice_status
+ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
+{
+ DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
+ u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
+#define ICE_FD_FV_NOT_FOUND (-2)
+ s8 first_free = ICE_FD_FV_NOT_FOUND;
+ u8 used[ICE_MAX_FV_WORDS] = { 0 };
+ s8 orig_free, si;
+ u32 mask_sel = 0;
+ u8 i, j, k;
+
+ bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
+
+ /* This code assumes that the Flow Director field vectors are assigned
+ * from the end of the FV indexes working towards the zero index, that
+ * only complete fields will be included and will be consecutive, and
+ * that there are no gaps between valid indexes.
+ */
+
+ /* Determine swap fields present */
+ for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
+ /* Find the first free entry, assuming right to left population.
+ * This is where we can start adding additional pairs if needed.
+ */
+ if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
+ ICE_PROT_INVALID)
+ first_free = i - 1;
+
+ for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
+ if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
+ es[i].off == ice_fd_pairs[j].off) {
+ set_bit(j, pair_list);
+ pair_start[j] = i;
+ }
+ }
+
+ orig_free = first_free;
+
+ /* determine missing swap fields that need to be added */
+ for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
+ u8 bit1 = test_bit(i + 1, pair_list);
+ u8 bit0 = test_bit(i, pair_list);
+
+ if (bit0 ^ bit1) {
+ u8 index;
+
+ /* add the appropriate 'paired' entry */
+ if (!bit0)
+ index = i;
+ else
+ index = i + 1;
+
+ /* check for room */
+ if (first_free + 1 < (s8)ice_fd_pairs[index].count)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* place in extraction sequence */
+ for (k = 0; k < ice_fd_pairs[index].count; k++) {
+ es[first_free - k].prot_id =
+ ice_fd_pairs[index].prot_id;
+ es[first_free - k].off =
+ ice_fd_pairs[index].off + (k * 2);
+
+ if (k > first_free)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* keep track of non-relevant fields */
+ mask_sel |= BIT(first_free - k);
+ }
+
+ pair_start[index] = first_free;
+ first_free -= ice_fd_pairs[index].count;
+ }
+ }
+
+ /* fill in the swap array */
+ si = hw->blk[ICE_BLK_FD].es.fvw - 1;
+ while (si >= 0) {
+ u8 indexes_used = 1;
+
+ /* assume flat at this index */
+#define ICE_SWAP_VALID 0x80
+ used[si] = si | ICE_SWAP_VALID;
+
+ if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
+ si -= indexes_used;
+ continue;
+ }
+
+ /* check for a swap location */
+ for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
+ if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
+ es[si].off == ice_fd_pairs[j].off) {
+ u8 idx;
+
+ /* determine the appropriate matching field */
+ idx = j + ((j % 2) ? -1 : 1);
+
+ indexes_used = ice_fd_pairs[idx].count;
+ for (k = 0; k < indexes_used; k++) {
+ used[si - k] = (pair_start[idx] - k) |
+ ICE_SWAP_VALID;
+ }
+
+ break;
+ }
+
+ si -= indexes_used;
+ }
+
+ /* for each set of 4 swap and 4 inset indexes, write the appropriate
+ * register
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
+ u32 raw_swap = 0;
+ u32 raw_in = 0;
+
+ for (k = 0; k < 4; k++) {
+ u8 idx;
+
+ idx = (j * 4) + k;
+ if (used[idx] && !(mask_sel & BIT(idx))) {
+ raw_swap |= used[idx] << (k * BITS_PER_BYTE);
+#define ICE_INSET_DFLT 0x9f
+ raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
+ }
+ }
+
+ /* write the appropriate swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
+
+ /* write the appropriate inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
+ prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
+ }
+
+ /* initially clear the mask select for this profile */
+ ice_update_fd_mask(hw, prof_id, 0);
+
+ return 0;
+}
+
+/**
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -2939,7 +3776,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
enum ice_status status;
- u32 byte = 0;
+ u8 byte = 0;
u8 prof_id;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -2953,6 +3790,18 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
+ if (blk == ICE_BLK_FD) {
+ /* For Flow Director block, the extraction sequence may
+ * need to be altered in the case where there are paired
+ * fields that have no match. This is necessary because
+ * for Flow Director, src and dest fields need to paired
+ * for filter programming and these values are swapped
+ * during Tx.
+ */
+ status = ice_update_fd_swap(hw, prof_id, es);
+ if (status)
+ goto err_ice_add_prof;
+ }
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
@@ -2962,8 +3811,10 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
- if (!prof)
+ if (!prof) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof;
+ }
prof->profile_cookie = id;
prof->prof_id = prof_id;
@@ -2972,7 +3823,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* build list of ptgs */
while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u32 bit;
+ u8 bit;
if (!ptypes[byte]) {
bytes--;
@@ -3006,7 +3857,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
break;
/* nothing left in byte, then exit */
- m = ~((1 << (bit + 1)) - 1);
+ m = ~(u8)((1 << (bit + 1)) - 1);
if (!(ptypes[byte] & m))
break;
}
@@ -3703,8 +4554,10 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
t->tcam[i].prof_id,
t->tcam[i].ptg, vsig, 0, 0,
vl_msk, dc_msk, nm_msk);
- if (status)
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
+ }
/* log change */
list_add(&p->list_entry, chg);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index c7b5e1a6ea2b..568ea519af51 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,14 @@
#define ICE_PKG_CNT 4
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port);
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
+
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 0fb3fe3ff3ea..a6f391eac8ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -20,7 +20,7 @@ struct ice_fv {
/* Package and segment headers and tables */
struct ice_pkg_hdr {
- struct ice_pkg_ver format_ver;
+ struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[1];
};
@@ -30,9 +30,9 @@ struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
__le32 seg_type;
- struct ice_pkg_ver seg_ver;
+ struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
- char seg_name[ICE_PKG_NAME_SIZE];
+ char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
@@ -75,7 +75,7 @@ struct ice_buf_table {
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
- __le32 track_id;
+ __le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
@@ -149,6 +149,7 @@ struct ice_buf_hdr {
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
@@ -291,6 +292,38 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
+/* Tunnel enabling */
+
+enum ice_tunnel_type {
+ TNL_VXLAN = 0,
+ TNL_GENEVE,
+ TNL_LAST = 0xFF,
+ TNL_ALL = 0xFF,
+};
+
+struct ice_tunnel_type_scan {
+ enum ice_tunnel_type type;
+ const char *label_prefix;
+};
+
+struct ice_tunnel_entry {
+ enum ice_tunnel_type type;
+ u16 boost_addr;
+ u16 port;
+ u16 ref;
+ struct ice_boost_tcam_entry *boost_entry;
+ u8 valid;
+ u8 in_use;
+ u8 marked;
+};
+
+#define ICE_TUNNEL_MAX_ENTRIES 16
+
+struct ice_tunnel_table {
+ struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
+ u16 count;
+};
+
struct ice_pkg_es {
__le16 count;
__le16 offset;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index 3de862a3c789..d74e5290677f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
-
+ /* GRE */
+ /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
+ sizeof_field(struct gre_full_hdr, key)),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outermost/First GRE header */
+static const u32 ice_ptypes_gre_of[] = {
+ 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
+ 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
@@ -178,6 +193,40 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
return 0;
}
+/* Sizes of fixed known protocol headers without header options */
+#define ICE_FLOW_PROT_HDR_SZ_MAC 14
+#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
+#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
+#define ICE_FLOW_PROT_HDR_SZ_TCP 20
+#define ICE_FLOW_PROT_HDR_SZ_UDP 8
+#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
+
+/**
+ * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose header size is to be determined
+ */
+static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
+{
+ u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
+
+ /* L3 headers */
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
+
+ /* L4 headers */
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
+ sz += ICE_FLOW_PROT_HDR_SZ_TCP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
+ sz += ICE_FLOW_PROT_HDR_SZ_UDP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
+ sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
+
+ return sz;
+}
+
/**
* ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
* @params: information about the flow to be processed
@@ -225,6 +274,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
+ if (!i) {
+ src = (const unsigned long *)ice_ptypes_gre_of;
+ bitmap_and(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ }
}
}
@@ -275,6 +330,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL;
break;
+ case ICE_FLOW_FIELD_IDX_GRE_KEYID:
+ prot_id = ICE_PROT_GRE_OF;
+ break;
default:
return ICE_ERR_NOT_IMPL;
}
@@ -324,6 +382,81 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
}
/**
+ * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose raw fields are to be be extracted
+ */
+static enum ice_status
+ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
+ u8 seg)
+{
+ u16 fv_words;
+ u16 hdrs_sz;
+ u8 i;
+
+ if (!params->prof->segs[seg].raws_cnt)
+ return 0;
+
+ if (params->prof->segs[seg].raws_cnt >
+ ARRAY_SIZE(params->prof->segs[seg].raws))
+ return ICE_ERR_MAX_LIMIT;
+
+ /* Offsets within the segment headers are not supported */
+ hdrs_sz = ice_flow_calc_seg_sz(params, seg);
+ if (!hdrs_sz)
+ return ICE_ERR_PARAM;
+
+ fv_words = hw->blk[params->blk].es.fvw;
+
+ for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
+ struct ice_flow_seg_fld_raw *raw;
+ u16 off, cnt, j;
+
+ raw = &params->prof->segs[seg].raws[i];
+
+ /* Storing extraction information */
+ raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
+ raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
+ ICE_FLOW_FV_EXTRACT_SZ;
+ raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
+ BITS_PER_BYTE;
+ raw->info.xtrct.idx = params->es_cnt;
+
+ /* Determine the number of field vector entries this raw field
+ * consumes.
+ */
+ cnt = DIV_ROUND_UP(raw->info.xtrct.disp +
+ (raw->info.src.last * BITS_PER_BYTE),
+ (ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE));
+ off = raw->info.xtrct.off;
+ for (j = 0; j < cnt; j++) {
+ u16 idx;
+
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= hw->blk[params->blk].es.count ||
+ params->es_cnt >= ICE_MAX_FV_WORDS)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = raw->info.xtrct.prot_id;
+ params->es[idx].off = off;
+ params->es_cnt++;
+ off += ICE_FLOW_FV_EXTRACT_SZ;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
@@ -349,6 +482,11 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
if (status)
return status;
}
+
+ /* Process raw matching bytes */
+ status = ice_flow_xtract_raws(hw, params, i);
+ if (status)
+ return status;
}
return status;
@@ -373,10 +511,8 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
return status;
switch (params->blk) {
+ case ICE_BLK_FD:
case ICE_BLK_RSS:
- /* Only header information is provided for RSS configuration.
- * No further processing is needed.
- */
status = 0;
break;
default:
@@ -458,6 +594,43 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
}
/**
+ * ice_dealloc_flow_entry - Deallocate flow entry memory
+ * @hw: pointer to the HW struct
+ * @entry: flow entry to be removed
+ */
+static void
+ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->entry)
+ devm_kfree(ice_hw_to_dev(hw), entry->entry);
+
+ devm_kfree(ice_hw_to_dev(hw), entry);
+}
+
+/**
+ * ice_flow_rem_entry_sync - Remove a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry: flow entry to be removed
+ */
+static enum ice_status
+ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
+ struct ice_flow_entry *entry)
+{
+ if (!entry)
+ return ICE_ERR_BAD_PTR;
+
+ list_del(&entry->l_entry);
+
+ ice_dealloc_flow_entry(hw, entry);
+
+ return 0;
+}
+
+/**
* ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
* @hw: pointer to the HW struct
* @blk: classification stage
@@ -544,6 +717,21 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
{
enum ice_status status;
+ /* Remove all remaining flow entries before removing the flow profile */
+ if (!list_empty(&prof->entries)) {
+ struct ice_flow_entry *e, *t;
+
+ mutex_lock(&prof->entries_lock);
+
+ list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
+ status = ice_flow_rem_entry_sync(hw, blk, e);
+ if (status)
+ break;
+ }
+
+ mutex_unlock(&prof->entries_lock);
+ }
+
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
@@ -629,7 +817,7 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @segs_cnt: number of packet segments provided
* @prof: stores the returned flow profile added
*/
-static enum ice_status
+enum ice_status
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
@@ -667,7 +855,7 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-static enum ice_status
+enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
@@ -691,6 +879,113 @@ out:
}
/**
+ * ice_flow_add_entry - Add a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: ID of the profile to add a new flow entry to
+ * @entry_id: unique ID to identify this flow entry
+ * @vsi_handle: software VSI handle for the flow entry
+ * @prio: priority of the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @entry_h: pointer to buffer that receives the new flow entry's handle
+ */
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
+ void *data, u64 *entry_h)
+{
+ struct ice_flow_entry *e = NULL;
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ /* No flow entry data is expected for RSS */
+ if (!entry_h || (!data && blk != ICE_BLK_RSS))
+ return ICE_ERR_BAD_PTR;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ mutex_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ } else {
+ /* Allocate memory for the entry being added and associate
+ * the VSI to the found flow profile
+ */
+ e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
+ if (!e)
+ status = ICE_ERR_NO_MEMORY;
+ else
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ }
+
+ mutex_unlock(&hw->fl_profs_locks[blk]);
+ if (status)
+ goto out;
+
+ e->id = entry_id;
+ e->vsi_handle = vsi_handle;
+ e->prof = prof;
+ e->priority = prio;
+
+ switch (blk) {
+ case ICE_BLK_FD:
+ case ICE_BLK_RSS:
+ break;
+ default:
+ status = ICE_ERR_NOT_IMPL;
+ goto out;
+ }
+
+ mutex_lock(&prof->entries_lock);
+ list_add(&e->l_entry, &prof->entries);
+ mutex_unlock(&prof->entries_lock);
+
+ *entry_h = ICE_FLOW_ENTRY_HNDL(e);
+
+out:
+ if (status && e) {
+ if (e->entry)
+ devm_kfree(ice_hw_to_dev(hw), e->entry);
+ devm_kfree(ice_hw_to_dev(hw), e);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_entry - Remove a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry_h: handle to the flow entry to be removed
+ */
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
+ u64 entry_h)
+{
+ struct ice_flow_entry *entry;
+ struct ice_flow_prof *prof;
+ enum ice_status status = 0;
+
+ if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
+ return ICE_ERR_PARAM;
+
+ entry = ICE_FLOW_ENTRY_PTR(entry_h);
+
+ /* Retain the pointer to the flow profile as the entry will be freed */
+ prof = entry->prof;
+
+ if (prof) {
+ mutex_lock(&prof->entries_lock);
+ status = ice_flow_rem_entry_sync(hw, blk, entry);
+ mutex_unlock(&prof->entries_lock);
+ }
+
+ return status;
+}
+
+/**
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
* @fld: field to be set
@@ -752,7 +1047,7 @@ ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
* create the content of a match entry. This function should only be used for
* fixed-size data structures.
*/
-static void
+void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
{
@@ -762,6 +1057,42 @@ ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
}
+/**
+ * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
+ * @seg: packet segment the field being set belongs to
+ * @off: offset of the raw field from the beginning of the segment in bytes
+ * @len: length of the raw pattern to be matched
+ * @val_loc: location of the value to match from entry's input buffer
+ * @mask_loc: location of mask value from entry's input buffer
+ *
+ * This function specifies the offset of the raw field to be match from the
+ * beginning of the specified packet segment, and the locations, in the form of
+ * byte offsets from the start of the input buffer for a flow entry, from where
+ * the value to match and the mask value to be extracted. These locations are
+ * then stored in the flow profile. When adding flow entries to the associated
+ * flow profile, these locations can be used to quickly extract the values to
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+ u16 val_loc, u16 mask_loc)
+{
+ if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
+ seg->raws[seg->raws_cnt].off = off;
+ seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
+ seg->raws[seg->raws_cnt].info.src.val = val_loc;
+ seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
+ /* The "last" field is used to store the length of the field */
+ seg->raws[seg->raws_cnt].info.src.last = len;
+ }
+
+ /* Overflows of "raws" will be handled as an error condition later in
+ * the flow when this information is processed.
+ */
+ seg->raws_cnt++;
+}
+
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
@@ -945,6 +1276,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
#define ICE_RSS_OUTER_HEADERS 1
+#define ICE_RSS_INNER_HEADERS 2
/* Flow profile ID format:
* [0:31] - Packet match fields
@@ -1085,6 +1417,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
+ if (!status)
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
+ addl_hdrs, ICE_RSS_INNER_HEADERS);
mutex_unlock(&hw->rss_locks);
return status;
@@ -1238,6 +1573,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
ICE_RSS_OUTER_HEADERS);
if (status)
break;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_INNER_HEADERS);
+ if (status)
+ break;
}
}
mutex_unlock(&hw->rss_locks);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 5558627bd5eb..3913da2116d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
+ ICE_FLOW_SEG_HDR_GRE = 0x00000200,
};
enum ice_flow_field {
@@ -58,6 +59,8 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ /* GRE */
+ ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
@@ -125,6 +128,7 @@ enum ice_flow_priority {
};
#define ICE_FLOW_SEG_MAX 2
+#define ICE_FLOW_SEG_RAW_FLD_MAX 2
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
@@ -161,14 +165,38 @@ struct ice_flow_fld_info {
struct ice_flow_seg_xtrct xtrct;
};
+struct ice_flow_seg_fld_raw {
+ struct ice_flow_fld_info info;
+ u16 off; /* Offset from the start of the segment */
+};
+
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+
+ u8 raws_cnt; /* Number of raw fields to be matched */
+ struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
};
+/* This structure describes a flow entry, and is tracked only in this file */
+struct ice_flow_entry {
+ struct list_head l_entry;
+
+ u64 id;
+ struct ice_flow_prof *prof;
+ /* Flow entry's content */
+ void *entry;
+ enum ice_flow_priority priority;
+ u16 vsi_handle;
+ u16 entry_sz;
+};
+
+#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
+#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
+
struct ice_flow_prof {
struct list_head l_entry;
@@ -194,7 +222,24 @@ struct ice_rss_cfg {
u32 packet_hdr;
};
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, u64 entry_h);
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_prof **prof);
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u64 entry_id, u16 vsi, enum ice_flow_priority prio,
+ void *data, u64 *entry_h);
+enum ice_status
+ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+ u16 val_loc, u16 mask_loc);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
new file mode 100644
index 000000000000..2418d4fff037
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_fltr.h"
+
+/**
+ * ice_fltr_free_list - free filter lists helper
+ * @dev: pointer to the device struct
+ * @h: pointer to the list head to be freed
+ *
+ * Helper function to free filter lists previously created using
+ * ice_fltr_add_mac_to_list
+ */
+void ice_fltr_free_list(struct device *dev, struct list_head *h)
+{
+ struct ice_fltr_list_entry *e, *tmp;
+
+ list_for_each_entry_safe(e, tmp, h, list_entry) {
+ list_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_fltr_add_entry_to_list - allocate and add filter entry to list
+ * @dev: pointer to device needed by alloc function
+ * @info: filter info struct that gets added to the passed in list
+ * @list: pointer to the list which contains MAC filters entry
+ */
+static int
+ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
+ struct list_head *list)
+{
+ struct ice_fltr_list_entry *entry;
+
+ entry = devm_kzalloc(dev, sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->fltr_info = *info;
+
+ INIT_LIST_HEAD(&entry->list_entry);
+ list_add(&entry->list_entry, list);
+
+ return 0;
+}
+
+/**
+ * ice_fltr_add_mac_list - add list of MAC filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+enum ice_status
+ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_mac_list - remove list of MAC filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+enum ice_status
+ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_add_vlan_list - add list of VLAN filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_vlan(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_vlan_list - remove list of VLAN filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_vlan(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_add_eth_list - add list of ethertype filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_add_eth_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_eth_list - remove list of ethertype filters
+ * @vsi: pointer to VSI struct
+ * @list: list of filters
+ */
+static enum ice_status
+ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
+{
+ return ice_remove_eth_mac(&vsi->back->hw, list);
+}
+
+/**
+ * ice_fltr_remove_all - remove all filters associated with VSI
+ * @vsi: pointer to VSI struct
+ */
+void ice_fltr_remove_all(struct ice_vsi *vsi)
+{
+ ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+}
+
+/**
+ * ice_fltr_add_mac_to_list - add MAC filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @mac: MAC address to add
+ * @action: filter action
+ */
+int
+ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
+ const u8 *mac, enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = ICE_FLTR_TX;
+ info.src_id = ICE_SRC_ID_VSI;
+ info.lkup_type = ICE_SW_LKUP_MAC;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+
+ ether_addr_copy(info.l_data.mac.mac_addr, mac);
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @vlan_id: VLAN ID to add
+ * @action: filter action
+ */
+static int
+ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list,
+ u16 vlan_id, enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = ICE_FLTR_TX;
+ info.src_id = ICE_SRC_ID_VSI;
+ info.lkup_type = ICE_SW_LKUP_VLAN;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+ info.l_data.vlan.vlan_id = vlan_id;
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_add_eth_to_list - add ethertype filter info to exsisting list
+ * @vsi: pointer to VSI struct
+ * @list: list to add filter info to
+ * @ethertype: ethertype of packet that matches filter
+ * @flag: filter direction, Tx or Rx
+ * @action: filter action
+ */
+static int
+ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
+ u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_info info = { 0 };
+
+ info.flag = flag;
+ info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ info.fltr_act = action;
+ info.vsi_handle = vsi->idx;
+ info.l_data.ethertype_mac.ethertype = ethertype;
+
+ if (flag == ICE_FLTR_TX)
+ info.src_id = ICE_SRC_ID_VSI;
+ else
+ info.src_id = ICE_SRC_ID_LPORT;
+
+ return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
+ list);
+}
+
+/**
+ * ice_fltr_prepare_mac - add or remove MAC rule
+ * @vsi: pointer to VSI struct
+ * @mac: MAC address to add
+ * @action: action to be performed on filter match
+ * @mac_action: pointer to add or remove MAC function
+ */
+static enum ice_status
+ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*mac_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ result = mac_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_mac_and_broadcast - add or remove MAC and broadcast filter
+ * @vsi: pointer to VSI struct
+ * @mac: MAC address to add
+ * @action: action to be performed on filter match
+ * @mac_action: pointer to add or remove MAC function
+ */
+static enum ice_status
+ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status(*mac_action)
+ (struct ice_vsi *, struct list_head *))
+{
+ u8 broadcast[ETH_ALEN];
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ eth_broadcast_addr(broadcast);
+ if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
+ ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ result = mac_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_vlan - add or remove VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: VLAN ID to add
+ * @action: action to be performed on filter match
+ * @vlan_action: pointer to add or remove VLAN function
+ */
+static enum ice_status
+ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*vlan_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
+ return ICE_ERR_NO_MEMORY;
+
+ result = vlan_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_prepare_eth - add or remove ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of packet to be filtered
+ * @flag: direction of packet, Tx or Rx
+ * @action: action to be performed on filter match
+ * @eth_action: pointer to add or remove ethertype function
+ */
+static enum ice_status
+ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action,
+ enum ice_status (*eth_action)(struct ice_vsi *,
+ struct list_head *))
+{
+ enum ice_status result;
+ LIST_HEAD(tmp_list);
+
+ if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
+ return ICE_ERR_NO_MEMORY;
+
+ result = eth_action(vsi, &tmp_list);
+ ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
+ return result;
+}
+
+/**
+ * ice_fltr_add_mac - add single MAC filter
+ * @vsi: pointer to VSI struct
+ * @mac: MAC to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
+}
+
+/**
+ * ice_fltr_add_mac_and_broadcast - add single MAC and broadcast
+ * @vsi: pointer to VSI struct
+ * @mac: MAC to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status
+ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac_and_broadcast(vsi, mac, action,
+ ice_fltr_add_mac_list);
+}
+
+/**
+ * ice_fltr_remove_mac - remove MAC filter
+ * @vsi: pointer to VSI struct
+ * @mac: filter MAC to remove
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
+}
+
+/**
+ * ice_fltr_add_vlan - add single VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: VLAN ID to add
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_vlan(vsi, vlan_id, action,
+ ice_fltr_add_vlan_list);
+}
+
+/**
+ * ice_fltr_remove_vlan - remove VLAN filter
+ * @vsi: pointer to VSI struct
+ * @vlan_id: filter VLAN to remove
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_vlan(vsi, vlan_id, action,
+ ice_fltr_remove_vlan_list);
+}
+
+/**
+ * ice_fltr_add_eth - add specyfic ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of filter
+ * @flag: direction of packet to be filtered, Tx or Rx
+ * @action: action to be performed on filter match
+ */
+enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
+ ice_fltr_add_eth_list);
+}
+
+/**
+ * ice_fltr_remove_eth - remove ethertype filter
+ * @vsi: pointer to VSI struct
+ * @ethertype: ethertype of filter
+ * @flag: direction of filter
+ * @action: action to remove
+ */
+enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
+ u16 flag, enum ice_sw_fwd_act_type action)
+{
+ return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
+ ice_fltr_remove_eth_list);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
new file mode 100644
index 000000000000..361cb4da9b43
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2020, Intel Corporation. */
+
+#ifndef _ICE_FLTR_H_
+#define _ICE_FLTR_H_
+
+void ice_fltr_free_list(struct device *dev, struct list_head *h);
+enum ice_status
+ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
+ const u8 *mac, enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
+enum ice_status
+ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
+
+enum ice_status
+ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
+ enum ice_sw_fwd_act_type action);
+
+enum ice_status
+ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action);
+enum ice_status
+ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action);
+void ice_fltr_remove_all(struct ice_vsi *vsi);
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1d37a9f02c1c..1086c9f778b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,9 +6,6 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
-#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096))
-#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096))
-#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096))
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD_HEAD_S 0
@@ -42,6 +39,7 @@
#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ARQLEN 0x0022E480
#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQCRIT_M BIT(30)
#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
#define PF_MBX_ARQT 0x0022E580
#define PF_MBX_ATQBAH 0x0022E180
@@ -50,6 +48,7 @@
#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN 0x0022E200
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
#define PRTDCB_GENC 0x00083000
@@ -58,6 +57,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
+#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
@@ -218,6 +218,11 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32))
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32))
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0)
#define GL_MDCK_TX_TDPU 0x00049348
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
@@ -289,6 +294,20 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
+#define GLQF_FD_CNT 0x00460018
+#define GLQF_FD_CNT_FD_BCNT_S 16
+#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16)
+#define GLQF_FD_SIZE 0x00460010
+#define GLQF_FD_SIZE_FD_GSIZE_S 0
+#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0)
+#define GLQF_FD_SIZE_FD_BSIZE_S 16
+#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
+#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4))
+#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512))
+#define PFQF_FD_ENA 0x0043A000
+#define PFQF_FD_ENA_FD_ENA_M BIT(0)
+#define PFQF_FD_SIZE 0x00460100
#define GLDCB_RTCTQ_RXQNUM_S 0
#define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
@@ -332,6 +351,7 @@
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
+#define GLSTAT_FD_CNT0L(_i) (0x003A0000 + ((_i) * 8))
#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
@@ -342,6 +362,9 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
+#define VSIQF_FD_CNT_FD_GCNT_S 0
+#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 878e125d8b42..14dfbbc1b2cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -40,6 +40,104 @@ union ice_32byte_rx_desc {
} wb; /* writeback */
};
+struct ice_fltr_desc {
+ __le64 qidx_compq_space_stat;
+ __le64 dtype_cmd_vsi_fdid;
+};
+
+#define ICE_FXD_FLTR_QW0_QINDEX_S 0
+#define ICE_FXD_FLTR_QW0_QINDEX_M (0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S)
+#define ICE_FXD_FLTR_QW0_COMP_Q_S 11
+#define ICE_FXD_FLTR_QW0_COMP_Q_M BIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S)
+#define ICE_FXD_FLTR_QW0_COMP_Q_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_S 12
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \
+ (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S)
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14
+#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S)
+#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST 0x2ULL
+
+#define ICE_FXD_FLTR_QW0_STAT_CNT_S 16
+#define ICE_FXD_FLTR_QW0_STAT_CNT_M \
+ (0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S)
+#define ICE_FXD_FLTR_QW0_STAT_ENA_S 29
+#define ICE_FXD_FLTR_QW0_STAT_ENA_M (0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S)
+#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_S 31
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_M BIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S)
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE 0x0ULL
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_TO_Q_S 32
+#define ICE_FXD_FLTR_QW0_TO_Q_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S)
+#define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_S 35
+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S)
+#define ICE_FXD_FLTR_QW0_TO_Q_PRIO1 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_S 38
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_M \
+ (0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S)
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_DROP_S 40
+#define ICE_FXD_FLTR_QW0_DROP_M BIT_ULL(ICE_FXD_FLTR_QW0_DROP_S)
+#define ICE_FXD_FLTR_QW0_DROP_NO 0x0ULL
+#define ICE_FXD_FLTR_QW0_DROP_YES 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_S 41
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S)
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_MDID_S 44
+#define ICE_FXD_FLTR_QW0_FLEX_MDID_M (0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S)
+#define ICE_FXD_FLTR_QW0_FLEX_MDID0 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_VAL_S 48
+#define ICE_FXD_FLTR_QW0_FLEX_VAL_M \
+ (0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S)
+#define ICE_FXD_FLTR_QW0_FLEX_VAL0 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_DTYPE_S 0
+#define ICE_FXD_FLTR_QW1_DTYPE_M (0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S)
+#define ICE_FXD_FLTR_QW1_PCMD_S 4
+#define ICE_FXD_FLTR_QW1_PCMD_M BIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S)
+#define ICE_FXD_FLTR_QW1_PCMD_ADD 0x0ULL
+#define ICE_FXD_FLTR_QW1_PCMD_REMOVE 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_PROF_PRI_S 5
+#define ICE_FXD_FLTR_QW1_PROF_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S)
+#define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_PROF_S 8
+#define ICE_FXD_FLTR_QW1_PROF_M (0x3FULL << ICE_FXD_FLTR_QW1_PROF_S)
+#define ICE_FXD_FLTR_QW1_PROF_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_FD_VSI_S 14
+#define ICE_FXD_FLTR_QW1_FD_VSI_M (0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S)
+#define ICE_FXD_FLTR_QW1_SWAP_S 24
+#define ICE_FXD_FLTR_QW1_SWAP_M BIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S)
+#define ICE_FXD_FLTR_QW1_SWAP_NOT_SET 0x0ULL
+#define ICE_FXD_FLTR_QW1_SWAP_SET 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
+#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
+#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
+#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
+#define ICE_FXD_FLTR_QW1_FDID_MDID_FD 0x05ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_S 32
+#define ICE_FXD_FLTR_QW1_FDID_M \
+ (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S)
+#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL
+
struct ice_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
@@ -262,6 +360,12 @@ enum ice_rx_flex_desc_status_error_0_bits {
ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
+enum ice_rx_flex_desc_status_error_1_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
+};
+
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
@@ -340,6 +444,7 @@ struct ice_tx_desc {
enum ice_tx_desc_dtype_value {
ICE_TX_DESC_DTYPE_DATA = 0x0,
ICE_TX_DESC_DTYPE_CTX = 0x1,
+ ICE_TX_DESC_DTYPE_FLTR_PROG = 0x8,
/* DESC_DONE - HW has completed write-back of descriptor */
ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
};
@@ -351,12 +456,14 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ ICE_TX_DESC_CMD_DUMMY = 0x0010,
ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
+ ICE_TX_DESC_CMD_RE = 0x0400,
};
#define ICE_TXD_QW1_OFFSET_S 16
@@ -413,6 +520,25 @@ enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_RESERVED = 0x40
};
+enum ice_tx_ctx_desc_eipt_offload {
+ ICE_TX_CTX_EIPT_NONE = 0x0,
+ ICE_TX_CTX_EIPT_IPV6 = 0x1,
+ ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2,
+ ICE_TX_CTX_EIPT_IPV4 = 0x3
+};
+
+#define ICE_TXD_CTX_QW0_EIPLEN_S 2
+
+#define ICE_TXD_CTX_QW0_L4TUNT_S 9
+
+#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S)
+#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S)
+
+#define ICE_TXD_CTX_QW0_NATLEN_S 12
+
+#define ICE_TXD_CTX_QW0_L4T_CS_S 23
+#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)
+
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
@@ -455,7 +581,7 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal do not write */
+ u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
/* macro to make the table lines short */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 2f256bf45efc..28b46cc9f5cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_flow.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
#include "ice_dcb_lib.h"
/**
@@ -18,6 +19,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_CTRL:
+ return "ICE_VSI_CTRL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
default:
@@ -37,7 +40,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
*/
static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{
- int i, ret = 0;
+ int ret = 0;
+ u16 i;
for (i = 0; i < vsi->num_rxq; i++)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
@@ -121,6 +125,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_CTRL:
case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
@@ -185,6 +190,11 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
*/
vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
break;
+ case ICE_VSI_CTRL:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ vsi->num_q_vectors = 1;
+ break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
@@ -248,8 +258,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
- vsi->vsi_num, status);
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
+ vsi->vsi_num, ice_stat_str(status));
kfree(ctxt);
}
@@ -320,7 +330,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
/* updates the PF for this cleared VSI */
pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi)
+ if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
@@ -331,6 +341,25 @@ int ice_vsi_clear(struct ice_vsi *vsi)
}
/**
+ * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+#define FDIR_RX_DESC_CLEAN_BUDGET 64
+ ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_tx_irq(q_vector->tx.ring);
+
+ return IRQ_HANDLED;
+}
+
+/**
* ice_msix_clean_rings - MSIX mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a q_vector
@@ -381,8 +410,6 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
vsi->back = pf;
set_bit(__ICE_DOWN, vsi->state);
- vsi->idx = pf->next_vsi;
-
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
@@ -396,6 +423,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
+ case ICE_VSI_CTRL:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+
+ /* Setup ctrl VSI MSIX irq handler */
+ vsi->irq_handler = ice_msix_clean_ctrl_vsi;
+ break;
case ICE_VSI_VF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -409,12 +443,20 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
goto unlock_pf;
}
- /* fill VSI slot in the PF struct */
- pf->vsi[pf->next_vsi] = vsi;
+ if (vsi->type == ICE_VSI_CTRL) {
+ /* Use the last VSI slot as the index for the control VSI */
+ vsi->idx = pf->num_alloc_vsi - 1;
+ pf->ctrl_vsi_idx = vsi->idx;
+ pf->vsi[vsi->idx] = vsi;
+ } else {
+ /* fill slot and make note of the index */
+ vsi->idx = pf->next_vsi;
+ pf->vsi[pf->next_vsi] = vsi;
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
+ }
goto unlock_pf;
err_rings:
@@ -426,6 +468,48 @@ unlock_pf:
}
/**
+ * ice_alloc_fd_res - Allocate FD resource for a VSI
+ * @vsi: pointer to the ice_vsi
+ *
+ * This allocates the FD resources
+ *
+ * Returns 0 on success, -EPERM on no-op or -EIO on failure
+ */
+static int ice_alloc_fd_res(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u32 g_val, b_val;
+
+ /* Flow Director filters are only allocated/assigned to the PF VSI which
+ * passes the traffic. The CTRL VSI is only used to add/delete filters
+ * so we don't allocate resources to it
+ */
+
+ /* FD filters from guaranteed pool per VSI */
+ g_val = pf->hw.func_caps.fd_fltr_guar;
+ if (!g_val)
+ return -EPERM;
+
+ /* FD filters from best effort pool */
+ b_val = pf->hw.func_caps.fd_fltr_best_effort;
+ if (!b_val)
+ return -EPERM;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EPERM;
+
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EPERM;
+
+ vsi->num_gfltr = g_val / pf->num_alloc_vsi;
+
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+
+ return 0;
+}
+
+/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -521,8 +605,8 @@ static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
if (status)
- dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -565,8 +649,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
- vsi->rss_table_size = cap->rss_table_size;
- vsi->rss_size = min_t(int, num_online_cpus(),
+ vsi->rss_table_size = (u16)cap->rss_table_size;
+ vsi->rss_size = min_t(u16, num_online_cpus(),
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
@@ -581,8 +665,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
case ICE_VSI_LB:
break;
default:
- dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
- vsi->type);
+ dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
+ ice_vsi_type_str(vsi->type));
break;
}
}
@@ -684,15 +768,15 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_RSS_QS_PER_VF;
- qcount_rx = min_t(int, rx_numq_tc, max_rss);
+ qcount_rx = min_t(u16, rx_numq_tc, max_rss);
if (!vsi->req_rxq)
- qcount_rx = min_t(int, qcount_rx,
+ qcount_rx = min_t(u16, qcount_rx,
vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount_rx);
+ pow = (u16)order_base_2(qcount_rx);
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -752,6 +836,51 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
}
/**
+ * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ * @vsi: the VSI being configured
+ */
+static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+{
+ u8 dflt_q_group, dflt_q_prio;
+ u16 dflt_q, report_q, val;
+
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
+ return;
+
+ val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
+ ctxt->info.valid_sections |= cpu_to_le16(val);
+ dflt_q = 0;
+ dflt_q_group = 0;
+ report_q = 0;
+ dflt_q_prio = 0;
+
+ /* enable flow director filtering/programming */
+ val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
+ ctxt->info.fd_options = cpu_to_le16(val);
+ /* max of allocated flow director filters */
+ ctxt->info.max_fd_fltr_dedicated =
+ cpu_to_le16(vsi->num_gfltr);
+ /* max of shared flow director filters any VSI may program */
+ ctxt->info.max_fd_fltr_shared =
+ cpu_to_le16(vsi->num_bfltr);
+ /* default queue index within the VSI of the default FD */
+ val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
+ ICE_AQ_VSI_FD_DEF_Q_M);
+ /* target queue or queue group to the FD filter */
+ val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
+ ICE_AQ_VSI_FD_DEF_GRP_M);
+ ctxt->info.fd_def_q = cpu_to_le16(val);
+ /* queue index on which FD filter completion is reported */
+ val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
+ ICE_AQ_VSI_FD_REPORT_Q_M);
+ /* priority of the default qindex action */
+ val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
+ ICE_AQ_VSI_FD_DEF_PRIORITY_M);
+ ctxt->info.fd_report_opt = cpu_to_le16(val);
+}
+
+/**
* ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
* @ctxt: the VSI context being set
* @vsi: the VSI being configured
@@ -776,13 +905,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
- case ICE_VSI_LB:
+ default:
dev_dbg(dev, "Unsupported VSI type %s\n",
ice_vsi_type_str(vsi->type));
return;
- default:
- dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
- return;
}
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
@@ -812,8 +938,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (!ctxt)
return -ENOMEM;
- ctxt->info = vsi->info;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_LB:
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
@@ -829,12 +955,15 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
}
ice_set_dflt_vsi_ctx(ctxt);
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ ice_set_fd_vsi_ctx(ctxt, vsi);
/* if the switch is in VEB mode, allow VSI loopback */
if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
/* Set LUT type and HASH type if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
+ vsi->type != ICE_VSI_CTRL) {
ice_set_rss_vsi_ctx(ctxt, vsi);
/* if updating VSI context, make sure to set valid_section:
* to indicate which section of VSI context being updated
@@ -941,7 +1070,7 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
- int start = 0, end = 0;
+ u16 start = 0, end = 0;
if (needed > res->end)
return -ENOMEM;
@@ -1024,6 +1153,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
struct device *dev;
u16 num_q_vectors;
+ int base;
dev = ice_pf_to_dev(pf);
/* SRIOV doesn't grab irq_tracker entries for each VSI */
@@ -1038,14 +1168,15 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
- vsi->idx);
- if (vsi->base_vector < 0) {
+ base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
+
+ if (base < 0) {
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
ice_get_free_res_count(pf->irq_tracker),
ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
return -ENOENT;
}
+ vsi->base_vector = (u16)base;
pf->num_avail_sw_msix -= num_q_vectors;
return 0;
@@ -1085,7 +1216,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
- int i;
+ u16 i;
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
@@ -1178,7 +1309,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
u8 *lut;
dev = ice_pf_to_dev(pf);
- vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -1193,7 +1324,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
- dev_err(dev, "set_rss_lut failed, error %d\n", status);
+ dev_err(dev, "set_rss_lut failed, error %s\n",
+ ice_stat_str(status));
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
@@ -1215,7 +1347,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
- dev_err(dev, "set_rss_key failed, error %d\n", status);
+ dev_err(dev, "set_rss_key failed, error %s\n",
+ ice_stat_str(status));
err = -EIO;
}
@@ -1248,8 +1381,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
if (status)
- dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
- vsi->vsi_num, status);
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -1281,91 +1414,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-}
-
-/**
- * ice_add_mac_to_list - Add a MAC address filter entry to the list
- * @vsi: the VSI to be forwarded to
- * @add_list: pointer to the list which contains MAC filter entries
- * @macaddr: the MAC address to be added.
- *
- * Adds MAC address filter entry to the temp list
- *
- * Returns 0 on success or ENOMEM on failure.
- */
-int
-ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr)
-{
- struct ice_fltr_list_entry *tmp;
- struct ice_pf *pf = vsi->back;
-
- tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
- if (!tmp)
- return -ENOMEM;
-
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.vsi_handle = vsi->idx;
- ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, add_list);
-
- return 0;
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
+ vsi_num, ice_stat_str(status));
}
/**
@@ -1415,54 +1514,21 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_free_fltr_list - free filter lists helper
- * @dev: pointer to the device struct
- * @h: pointer to the list head to be freed
- *
- * Helper function to free filter lists previously created using
- * ice_add_mac_to_list
- */
-void ice_free_fltr_list(struct device *dev, struct list_head *h)
-{
- struct ice_fltr_list_entry *e, *tmp;
-
- list_for_each_entry_safe(e, tmp, h, list_entry) {
- list_del(&e->list_entry);
- devm_kfree(dev, e);
- }
-}
-
-/**
* ice_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
* @vid: VLAN ID to be added
+ * @action: filter action to be performed on match
*/
-int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
+int
+ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
{
- struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
- tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
- tmp->fltr_info.vsi_handle = vsi->idx;
- tmp->fltr_info.l_data.vlan.vlan_id = vid;
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, &tmp_add_list);
-
- status = ice_add_vlan(&pf->hw, &tmp_add_list);
- if (!status) {
+ if (!ice_fltr_add_vlan(vsi, vid, action)) {
vsi->num_vlan++;
} else {
err = -ENODEV;
@@ -1470,7 +1536,6 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
vsi->vsi_num);
}
- ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
@@ -1483,41 +1548,25 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
*/
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
- struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return -ENOMEM;
-
- list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- list->fltr_info.l_data.vlan.vlan_id = vid;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- status = ice_remove_vlan(&pf->hw, &tmp_add_list);
+ status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!status) {
vsi->num_vlan--;
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
- vid, vsi->vsi_num, status);
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
+ vid, vsi->vsi_num, ice_stat_str(status));
} else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
- vid, vsi->vsi_num, status);
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
+ vid, vsi->vsi_num, ice_stat_str(status));
err = -EIO;
}
- ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
@@ -1547,6 +1596,32 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
}
/**
+ * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
+ * @hw: HW pointer
+ * @pf_q: index of the Rx queue in the PF's queue space
+ * @rxdid: flexible descriptor RXDID
+ * @prio: priority for the RXDID for this queue
+ */
+void
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
+{
+ int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+
+ /* clear any previous values */
+ regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
+ QRXFLXP_CNTXT_RXDID_PRIO_M |
+ QRXFLXP_CNTXT_TS_M);
+
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+}
+
+/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -1671,7 +1746,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- u32 txq = 0, rxq = 0;
+ u16 txq = 0, rxq = 0;
int i, q;
for (i = 0; i < vsi->num_q_vectors; i++) {
@@ -1737,8 +1812,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -1761,6 +1837,12 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
enum ice_status status;
int ret = 0;
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.pvid)
+ return 0;
+
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -1783,8 +1865,9 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
- ena, status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
+ ena, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -1922,9 +2005,10 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
- ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- pf->hw.adminq.sq_last_status);
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
+ ice_stat_str(status),
+ ice_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -1991,47 +2075,6 @@ clear_reg_idx:
}
/**
- * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
- * @vsi: the VSI being configured
- * @add_rule: boolean value to add or remove ethertype filter rule
- */
-static void
-ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
-{
- struct ice_fltr_list_entry *list;
- struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
- struct device *dev;
-
- dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return;
-
- list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
- list->fltr_info.fltr_act = ICE_DROP_PACKET;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
-
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- if (add_rule)
- status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
- else
- status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
-
- if (status)
- dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
- vsi->vsi_num, status);
-
- ice_free_fltr_list(dev, &tmp_add_list);
-}
-
-/**
* ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
@@ -2039,45 +2082,25 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
- struct ice_fltr_list_entry *list;
+ enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
+ enum ice_sw_fwd_act_type act);
struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
dev = ice_pf_to_dev(pf);
- list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return;
+ eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
- list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
- list->fltr_info.vsi_handle = vsi->idx;
- list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
-
- if (tx) {
- list->fltr_info.fltr_act = ICE_DROP_PACKET;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src_id = ICE_SRC_ID_VSI;
- } else {
- list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- list->fltr_info.flag = ICE_FLTR_RX;
- list->fltr_info.src_id = ICE_SRC_ID_LPORT;
- }
-
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- if (create)
- status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
+ if (tx)
+ status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
else
- status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
+ status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI);
if (status)
- dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
+ dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
-
- ice_free_fltr_list(dev, &tmp_add_list);
+ vsi->vsi_num, ice_stat_str(status));
}
/**
@@ -2122,10 +2145,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
+ ice_alloc_fd_res(vsi);
+
if (ice_vsi_get_qs(vsi)) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
vsi->idx);
- goto unroll_get_qs;
+ goto unroll_vsi_alloc;
}
/* set RSS capabilities */
@@ -2140,6 +2165,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_get_qs;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2164,20 +2190,23 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* so this handles those cases (i.e. adding the PF to a bridge
* without the 8021q module loaded).
*/
- ret = ice_vsi_add_vlan(vsi, 0);
+ ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
if (ret)
goto unroll_clear_rings;
ice_vsi_map_rings_to_vectors(vsi);
- /* Do not exit if configuring RSS had an issue, at least
- * receive traffic on first queue. Hence no need to capture
- * return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- ice_vsi_cfg_rss_lut_key(vsi);
- ice_vsi_set_rss_flow_fld(vsi);
- }
+ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
+ if (vsi->type != ICE_VSI_CTRL)
+ /* Do not exit if configuring RSS had an issue, at
+ * least receive traffic on first queue. Hence no
+ * need to capture return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ ice_init_arfs(vsi);
break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
@@ -2223,8 +2252,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "VSI %d failed lan queue config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
goto unroll_vector_base;
}
@@ -2239,9 +2268,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
*/
if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
- ice_vsi_add_rem_eth_mac(vsi, true);
-
- /* Tx LLDP packets */
+ ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, true);
}
@@ -2259,6 +2287,7 @@ unroll_vsi_init:
ice_vsi_delete(vsi);
unroll_get_qs:
ice_vsi_put_qs(vsi);
+unroll_vsi_alloc:
ice_vsi_clear(vsi);
return NULL;
@@ -2411,6 +2440,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
if (!locked)
rtnl_unlock();
}
+ } else if (vsi->type == ICE_VSI_CTRL) {
+ err = ice_vsi_open_ctrl(vsi);
}
return err;
@@ -2440,6 +2471,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
+ } else if (vsi->type == ICE_VSI_CTRL) {
+ ice_vsi_close(vsi);
}
}
@@ -2558,7 +2591,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) {
- ice_vsi_add_rem_eth_mac(vsi, false);
+ ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, false);
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
@@ -2568,7 +2602,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -2673,15 +2707,13 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
&coalesce[i]);
- for (; i < vsi->num_q_vectors; i++) {
- struct ice_coalesce_stored coalesce_dflt = {
- .itr_tx = ICE_DFLT_TX_ITR,
- .itr_rx = ICE_DFLT_RX_ITR,
- .intrl = 0
- };
+ /* number of q_vectors increased, so assume coalesce settings were
+ * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
+ * the previous settings from q_vector 0 for all of the new q_vectors
+ */
+ for (; i < vsi->num_q_vectors; i++)
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
- &coalesce_dflt);
- }
+ &coalesce[0]);
}
/**
@@ -2746,6 +2778,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vsi;
ice_vsi_get_qs(vsi);
+
+ ice_alloc_fd_res(vsi);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2754,6 +2788,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vsi;
switch (vsi->type) {
+ case ICE_VSI_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2773,17 +2808,19 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
- vsi->num_xdp_txq = vsi->alloc_txq;
+ vsi->num_xdp_txq = vsi->alloc_rxq;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
}
- /* Do not exit if configuring RSS had an issue, at least
- * receive traffic on first queue. Hence no need to capture
- * return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss_lut_key(vsi);
+ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
+ if (vsi->type != ICE_VSI_CTRL)
+ /* Do not exit if configuring RSS had an issue, at
+ * least receive traffic on first queue. Hence no
+ * need to capture return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -2814,8 +2851,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
if (init_vsi) {
ret = -EIO;
goto err_vectors;
@@ -2924,8 +2961,8 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs);
if (status) {
- dev_err(dev, "VSI %d failed TC config, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "VSI %d failed TC config, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
ret = -EIO;
goto out;
}
@@ -2985,33 +3022,27 @@ void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
}
/**
- * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
- * @vsi: the VSI being configured MAC filter
- * @macaddr: the MAC address to be added.
- * @set: Add or delete a MAC filter
- *
- * Adds or removes MAC address filter entry for VF VSI
+ * ice_status_to_errno - convert from enum ice_status to Linux errno
+ * @err: ice_status value to convert
*/
-enum ice_status
-ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
+int ice_status_to_errno(enum ice_status err)
{
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
-
- /* Update MAC filter list to be added or removed for a VSI */
- if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
- status = ICE_ERR_NO_MEMORY;
- goto cfg_mac_fltr_exit;
+ switch (err) {
+ case ICE_SUCCESS:
+ return 0;
+ case ICE_ERR_DOES_NOT_EXIST:
+ return -ENOENT;
+ case ICE_ERR_OUT_OF_RANGE:
+ return -ENOTTY;
+ case ICE_ERR_PARAM:
+ return -EINVAL;
+ case ICE_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case ICE_ERR_MAX_LIMIT:
+ return -EAGAIN;
+ default:
+ return -EINVAL;
}
-
- if (set)
- status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
- else
- status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
-
-cfg_mac_fltr_exit:
- ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
- return status;
}
/**
@@ -3079,8 +3110,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
- vsi->vsi_num, status);
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
+ vsi->vsi_num, ice_stat_str(status));
return -EIO;
}
@@ -3118,8 +3149,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
- dflt_vsi->vsi_num, status);
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
+ dflt_vsi->vsi_num, ice_stat_str(status));
return -EIO;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 04ca00799364..d80e6afa4511 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -8,12 +8,6 @@
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
-int
-ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr);
-
-void ice_free_fltr_list(struct device *dev, struct list_head *h);
-
void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
@@ -22,7 +16,8 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
+int
+ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -79,6 +74,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
bool ice_is_reset_in_progress(unsigned long *state);
+void
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio);
+
void ice_vsi_put_qs(struct ice_vsi *vsi);
void ice_vsi_dis_irq(struct ice_vsi *vsi);
@@ -97,6 +95,8 @@ void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
+int ice_status_to_errno(enum ice_status err);
+
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5b190c257124..082825e3cb39 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -8,6 +8,7 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
@@ -133,38 +134,24 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
static int ice_init_mac_fltr(struct ice_pf *pf)
{
enum ice_status status;
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
+ u8 *perm_addr;
vsi = ice_get_main_vsi(pf);
if (!vsi)
return -EINVAL;
- /* To add a MAC filter, first add the MAC to a list and then
- * pass the list to ice_add_mac.
- */
-
- /* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
- if (status)
- goto unregister;
-
- /* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list as well.
- */
- eth_broadcast_addr(broadcast);
- status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
- if (status)
- goto unregister;
+ perm_addr = vsi->port_info->mac.perm_addr;
+ status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
+ if (!status)
+ return 0;
- return 0;
-unregister:
/* We aren't useful with no MAC filters, so unregister if we
* had an error
*/
- if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
- status);
+ if (vsi->netdev->reg_state == NETREG_REGISTERED) {
+ dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
+ ice_stat_str(status));
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
@@ -188,7 +175,8 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
+ if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
+ ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
@@ -209,7 +197,8 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
+ if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
+ ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
@@ -307,8 +296,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
- status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
- ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
+ status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
+ ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
if (status) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
@@ -319,8 +308,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Add MAC addresses in the sync list */
- status = ice_add_mac(hw, &vsi->tmp_sync_list);
- ice_free_fltr_list(dev, &vsi->tmp_sync_list);
+ status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
+ ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
@@ -357,7 +346,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
- } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ } else {
+ /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
if (vsi->vlan_ena)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
@@ -462,7 +452,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- int i;
+ unsigned int i;
/* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
@@ -1017,8 +1007,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
- dev_err(dev, "%s Receive Queue event error %d\n", qtype,
- ret);
+ dev_err(dev, "%s Receive Queue event error %s\n", qtype,
+ ice_stat_str(ret));
break;
}
@@ -1123,7 +1113,7 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
*
* If not already scheduled, this puts the task into the work queue.
*/
-static void ice_service_task_schedule(struct ice_pf *pf)
+void ice_service_task_schedule(struct ice_pf *pf)
{
if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
!test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
@@ -1198,8 +1188,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ unsigned int i;
u32 reg;
- int i;
if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
/* Since the VF MDD event logging is rate limited, check if
@@ -1332,8 +1322,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* PF can be configured to reset the VF through ethtool
* private flag mdd-auto-reset-vf.
*/
- if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ ice_print_vf_rx_mdd_event(vf);
ice_reset_vf(&pf->vf[i], false);
+ }
}
}
@@ -1493,7 +1488,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
-
+ ice_sync_arfs_fltrs(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -1652,9 +1647,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
}
/* register for affinity change notifications */
- q_vector->affinity_notify.notify = ice_irq_affinity_notify;
- q_vector->affinity_notify.release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
+ struct irq_affinity_notify *affinity_notify;
+
+ affinity_notify = &q_vector->affinity_notify;
+ affinity_notify->notify = ice_irq_affinity_notify;
+ affinity_notify->release = ice_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, affinity_notify);
+ }
/* assign the mask for this irq */
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
@@ -1666,8 +1666,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector) {
vector--;
- irq_num = pf->msix_entries[base + vector].vector,
- irq_set_affinity_notifier(irq_num, NULL);
+ irq_num = pf->msix_entries[base + vector].vector;
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
@@ -1809,8 +1810,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n",
- status);
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
+ ice_stat_str(status));
goto clear_xdp_rings;
}
ice_vsi_assign_bpf_prog(vsi, prog);
@@ -1898,6 +1899,9 @@ free_qmap:
for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = vsi->num_txq;
+ /* change number of XDP Tx queues to 0 */
+ vsi->num_xdp_txq = 0;
+
return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
}
@@ -1931,7 +1935,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
- vsi->num_xdp_txq = vsi->alloc_txq;
+ vsi->num_xdp_txq = vsi->alloc_rxq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
@@ -2137,10 +2141,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
ret = IRQ_HANDLED;
- if (!test_bit(__ICE_DOWN, pf->state)) {
- ice_service_task_schedule(pf);
- ice_irq_dynamic_ena(hw, NULL, NULL);
- }
+ ice_service_task_schedule(pf);
+ ice_irq_dynamic_ena(hw, NULL, NULL);
return ret;
}
@@ -2247,7 +2249,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
return oicr_idx;
pf->num_avail_sw_msix -= 1;
- pf->oicr_idx = oicr_idx;
+ pf->oicr_idx = (u16)oicr_idx;
err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
@@ -2331,6 +2333,7 @@ static void ice_set_netdev_features(struct net_device *netdev)
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA |
+ NETIF_F_NTUPLE |
NETIF_F_RXHASH;
csumo_features = NETIF_F_RXCSUM |
@@ -2342,13 +2345,27 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- tso_features = NETIF_F_TSO |
+ tso_features = NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_L4;
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
vlano_features | tso_features;
+ /* add support for HW_CSUM on packets with MPLS header */
+ netdev->mpls_features = NETIF_F_HW_CSUM;
+
/* enable features */
netdev->features |= netdev->hw_features;
/* encap and VLAN devices inherit default, csumo and tso features */
@@ -2411,7 +2428,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
err = register_netdev(vsi->netdev);
if (err)
- goto err_destroy_devlink_port;
+ goto err_free_netdev;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
@@ -2422,9 +2439,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
return 0;
+err_free_netdev:
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
err_destroy_devlink_port:
ice_devlink_destroy_port(pf);
-
return err;
}
@@ -2457,6 +2476,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
+ * ice_ctrl_vsi_setup - Set up a control VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ *
+ * Returns pointer to the successfully allocated VSI software struct
+ * on success, otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
+}
+
+/**
* ice_lb_vsi_setup - Set up a loopback VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -2509,7 +2542,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid);
+ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!ret) {
vsi->vlan_ena = true;
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
@@ -2594,12 +2627,22 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
+ status = ice_set_cpu_rx_rmap(vsi);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
+ vsi->vsi_num, status);
+ status = -EINVAL;
+ goto unroll_napi_add;
+ }
status = ice_init_mac_fltr(pf);
if (status)
- goto unroll_napi_add;
+ goto free_cpu_rx_map;
return status;
+free_cpu_rx_map:
+ ice_free_cpu_rx_rmap(vsi);
+
unroll_napi_add:
if (vsi) {
ice_napi_del(vsi);
@@ -2630,7 +2673,8 @@ unroll_vsi_setup:
static u16
ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
{
- u16 count = 0, bit;
+ unsigned long bit;
+ u16 count = 0;
mutex_lock(lock);
for_each_clear_bit(bit, pf_qmap, size)
@@ -2703,6 +2747,23 @@ static void ice_set_pf_caps(struct ice_pf *pf)
if (func_caps->common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
+ clear_bit(ICE_FLAG_FD_ENA, pf->flags);
+ if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
+ u16 unused;
+
+ /* ctrl_vsi_idx will be set to a valid value when flow director
+ * is setup by ice_init_fdir
+ */
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ set_bit(ICE_FLAG_FD_ENA, pf->flags);
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(&pf->hw, &unused,
+ func_caps->fd_fltr_guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(&pf->hw, &unused,
+ func_caps->fd_fltr_best_effort);
+ }
+
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
@@ -2769,6 +2830,15 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_budget += needed;
v_left -= needed;
+ /* reserve one vector for flow director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ needed = ICE_FDIR_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ v_budget += needed;
+ v_left -= needed;
+ }
+
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
@@ -2793,8 +2863,10 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
-/* 2 vectors for LAN (traffic + OICR) */
+/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
#define ICE_MIN_LAN_VECS 2
+#define ICE_MIN_RDMA_VECS 2
+#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
if (v_actual < ICE_MIN_LAN_VECS) {
/* error if we can't get minimum vectors */
@@ -2869,8 +2941,8 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/* populate SW interrupts pool with number of OS granted IRQs. */
- pf->num_avail_sw_msix = vectors;
- pf->irq_tracker->num_entries = vectors;
+ pf->num_avail_sw_msix = (u16)vectors;
+ pf->irq_tracker->num_entries = (u16)vectors;
pf->irq_tracker->end = pf->irq_tracker->num_entries;
return 0;
@@ -2902,9 +2974,9 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
}
if (new_tx)
- vsi->req_txq = new_tx;
+ vsi->req_txq = (u16)new_tx;
if (new_rx)
- vsi->req_rxq = new_rx;
+ vsi->req_rxq = (u16)new_rx;
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
@@ -2985,6 +3057,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
*status = ICE_ERR_NOT_SUPPORTED;
}
break;
+ case ICE_ERR_FW_DDP_MISMATCH:
+ dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
+ break;
case ICE_ERR_BUF_TOO_SHORT:
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
@@ -3013,6 +3088,9 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
case ICE_AQ_RC_EBADMAN:
case ICE_AQ_RC_EBADBUF:
dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ /* poll for reset to complete */
+ if (ice_check_reset(hw))
+ dev_err(dev, "Error resetting device. Please reload the driver\n");
return;
default:
break;
@@ -3100,6 +3178,53 @@ static enum ice_status ice_send_version(struct ice_pf *pf)
}
/**
+ * ice_init_fdir - Initialize flow director VSI and configuration
+ * @pf: pointer to the PF instance
+ *
+ * returns 0 on success, negative on error
+ */
+static int ice_init_fdir(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *ctrl_vsi;
+ int err;
+
+ /* Side Band Flow Director needs to have a control VSI.
+ * Allocate it and store it in the PF.
+ */
+ ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
+ if (!ctrl_vsi) {
+ dev_dbg(dev, "could not create control VSI\n");
+ return -ENOMEM;
+ }
+
+ err = ice_vsi_open_ctrl(ctrl_vsi);
+ if (err) {
+ dev_dbg(dev, "could not open control VSI\n");
+ goto err_vsi_open;
+ }
+
+ mutex_init(&pf->hw.fdir_fltr_lock);
+
+ err = ice_fdir_create_dflt_rules(pf);
+ if (err)
+ goto err_fdir_rule;
+
+ return 0;
+
+err_fdir_rule:
+ ice_fdir_release_flows(&pf->hw);
+ ice_vsi_close(ctrl_vsi);
+err_vsi_open:
+ ice_vsi_release(ctrl_vsi);
+ if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[pf->ctrl_vsi_idx] = NULL;
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+ return err;
+}
+
+/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
*/
@@ -3123,7 +3248,7 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
if (!opt_fw_filename)
return NULL;
- snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
+ snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
@@ -3295,12 +3420,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_unroll;
+ goto err_init_vsi_unroll;
}
- /* Driver is mostly up */
- clear_bit(__ICE_DOWN, pf->state);
-
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
@@ -3356,12 +3478,16 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_verify_cacheline_size(pf);
- /* If no DDP driven features have to be setup, return here */
+ /* If no DDP driven features have to be setup, we are done with probe */
if (ice_is_safe_mode(pf))
- return 0;
+ goto probe_done;
/* initialize DDP driven features */
+ /* Note: Flow director init failure is non-fatal to load */
+ if (ice_init_fdir(pf))
+ dev_err(dev, "could not initialize flow director\n");
+
/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
@@ -3373,6 +3499,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
+probe_done:
+ /* ready to go, so clear down state bit */
+ clear_bit(__ICE_DOWN, pf->state);
return 0;
err_alloc_sw_unroll:
@@ -3384,6 +3513,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
+err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
@@ -3421,6 +3551,9 @@ static void ice_remove(struct pci_dev *pdev)
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+ if (!ice_is_safe_mode(pf))
+ ice_remove_arfs(pf);
ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
@@ -3705,25 +3838,24 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
- /* When we change the MAC address we also have to change the MAC address
- * based filter rules that were created previously for the old MAC
- * address. So first, we remove the old filter rule using ice_remove_mac
- * and then create a new filter rule using ice_add_mac via
- * ice_vsi_cfg_mac_fltr function call for both add and/or remove
- * filters.
- */
- status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
- if (status) {
+ /* Clean up old MAC filter. Not an error if old filter doesn't exist */
+ status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
+ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
- status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
- if (status) {
- err = -EADDRNOTAVAIL;
- goto err_update_filters;
+ /* Add filter for new MAC. If filter exists, just return success */
+ status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
+ return 0;
}
+ /* error if the new filter addition failed */
+ if (status)
+ err = -EADDRNOTAVAIL;
+
err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
@@ -3740,8 +3872,8 @@ err_update_filters:
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
- mac, status);
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
+ mac, ice_stat_str(status));
}
return 0;
}
@@ -3805,8 +3937,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
- netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
- status);
+ netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
+ ice_stat_str(status));
return -EIO;
}
@@ -3938,6 +4070,16 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
ret = ice_cfg_vlan_pruning(vsi, false, false);
+ if ((features & NETIF_F_NTUPLE) &&
+ !(netdev->features & NETIF_F_NTUPLE)) {
+ ice_vsi_manage_fdir(vsi, true);
+ ice_init_arfs(vsi);
+ } else if (!(features & NETIF_F_NTUPLE) &&
+ (netdev->features & NETIF_F_NTUPLE)) {
+ ice_vsi_manage_fdir(vsi, false);
+ ice_clear_arfs(vsi);
+ }
+
return ret;
}
@@ -4084,6 +4226,33 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
}
/**
+ * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
+ * @vsi: the VSI to be updated
+ * @rings: rings to work on
+ * @count: number of rings
+ */
+static void
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+ u16 count)
+{
+ struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_ring *ring;
+ u64 pkts, bytes;
+
+ ring = READ_ONCE(rings[i]);
+ ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ vsi_stats->tx_packets += pkts;
+ vsi_stats->tx_bytes += bytes;
+ vsi->tx_restart += ring->tx_stats.restart_q;
+ vsi->tx_busy += ring->tx_stats.tx_busy;
+ vsi->tx_linearize += ring->tx_stats.tx_linearize;
+ }
+}
+
+/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
@@ -4110,15 +4279,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_lock();
/* update Tx rings counters */
- ice_for_each_txq(vsi, i) {
- ring = READ_ONCE(vsi->tx_rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
- vsi_stats->tx_packets += pkts;
- vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->tx_stats.restart_q;
- vsi->tx_busy += ring->tx_stats.tx_busy;
- vsi->tx_linearize += ring->tx_stats.tx_linearize;
- }
+ ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
@@ -4130,6 +4291,11 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
}
+ /* update XDP Tx rings counters */
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+ vsi->num_xdp_txq);
+
rcu_read_unlock();
}
@@ -4162,7 +4328,13 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) {
cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors +
- pf->stats.illegal_bytes;
+ pf->stats.illegal_bytes +
+ pf->stats.rx_len_errors +
+ pf->stats.rx_undersize +
+ pf->hw_csum_rx_error +
+ pf->stats.rx_jabber +
+ pf->stats.rx_fragments +
+ pf->stats.rx_oversize;
cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
@@ -4177,6 +4349,7 @@ void ice_update_pf_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
+ u16 fd_ctr_base;
u8 port;
port = hw->port_info->lport;
@@ -4265,6 +4438,12 @@ void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big);
+ fd_ctr_base = hw->fd_ctr_base;
+
+ ice_stat_update40(hw,
+ GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
+ pf->stat_prev_loaded, &prev_ps->fd_sb_match,
+ &cur_ps->fd_sb_match);
ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
@@ -4308,6 +4487,8 @@ void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
&prev_ps->rx_jabber, &cur_ps->rx_jabber);
+ cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
+
pf->stat_prev_loaded = true;
}
@@ -4493,6 +4674,62 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_open_ctrl - open control VSI for use
+ * @vsi: the VSI to open
+ *
+ * Initialization of the Control VSI
+ *
+ * Returns 0 on success, negative value on error
+ */
+int ice_vsi_open_ctrl(struct ice_vsi *vsi)
+{
+ char int_name[ICE_INT_NAME_STR_LEN];
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ /* allocate descriptors */
+ err = ice_vsi_setup_tx_rings(vsi);
+ if (err)
+ goto err_setup_tx;
+
+ err = ice_vsi_setup_rx_rings(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = ice_vsi_cfg(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
+ dev_driver_string(dev), dev_name(dev));
+ err = ice_vsi_req_irq_msix(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ ice_vsi_cfg_msix(vsi);
+
+ err = ice_vsi_start_all_rx_rings(vsi);
+ if (err)
+ goto err_up_complete;
+
+ clear_bit(__ICE_DOWN, vsi->state);
+ ice_vsi_ena_irq(vsi);
+
+ return 0;
+
+err_up_complete:
+ ice_down(vsi);
+err_setup_rx:
+ ice_vsi_free_rx_rings(vsi);
+err_setup_tx:
+ ice_vsi_free_tx_rings(vsi);
+
+ return err;
+}
+
+/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -4604,8 +4841,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
- status, vsi->idx, ice_vsi_type_str(type));
+ dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
+ ice_stat_str(status), vsi->idx,
+ ice_vsi_type_str(type));
return -EIO;
}
@@ -4659,6 +4897,11 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf)
* ice_rebuild - rebuild after reset
* @pf: PF to rebuild
* @reset_type: type of reset
+ *
+ * Do not rebuild VF VSI in this flow because that is already handled via
+ * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
+ * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
+ * to reset/rebuild all the VF VSI twice.
*/
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
@@ -4674,7 +4917,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_init_all_ctrlq(hw);
if (ret) {
- dev_err(dev, "control queues init failed %d\n", ret);
+ dev_err(dev, "control queues init failed %s\n",
+ ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4690,7 +4934,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_clear_pf_cfg(hw);
if (ret) {
- dev_err(dev, "clear PF configuration failed %d\n", ret);
+ dev_err(dev, "clear PF configuration failed %s\n",
+ ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4704,7 +4949,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_get_caps(hw);
if (ret) {
- dev_err(dev, "ice_get_caps failed %d\n", ret);
+ dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
+ goto err_init_ctrlq;
+ }
+
+ ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (ret) {
+ dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
goto err_init_ctrlq;
}
@@ -4723,6 +4974,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_sched_init_port;
}
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
+ if (!rd32(hw, PFQF_FD_SIZE)) {
+ u16 unused, guar, b_effort;
+
+ guar = hw->func_caps.fd_fltr_guar;
+ b_effort = hw->func_caps.fd_fltr_best_effort;
+
+ /* force guaranteed filter pool for PF */
+ ice_alloc_fd_guar_item(hw, &unused, guar);
+ /* force shared filter pool for PF */
+ ice_alloc_fd_shrd_item(hw, &unused, b_effort);
+ }
+ }
+
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
@@ -4733,12 +4999,22 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
- err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF);
+ /* If Flow Director is active */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
if (err) {
- dev_err(dev, "VF VSI rebuild failed: %d\n", err);
+ dev_err(dev, "control VSI rebuild failed: %d\n", err);
goto err_vsi_rebuild;
}
+
+ /* replay HW Flow Director recipes */
+ if (hw->fdir_prof)
+ ice_fdir_replay_flows(hw);
+
+ /* replay Flow Director filters */
+ ice_fdir_replay_fltrs(pf);
+
+ ice_rebuild_arfs(pf);
}
ice_update_pf_netdev_link(pf);
@@ -4746,8 +5022,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* tell the firmware we are up */
ret = ice_send_version(pf);
if (ret) {
- dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
- ret);
+ dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
+ ice_stat_str(ret));
goto err_vsi_rebuild;
}
@@ -4795,7 +5071,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_pf *pf = vsi->back;
u8 count = 0;
- if (new_mtu == netdev->mtu) {
+ if (new_mtu == (int)netdev->mtu) {
netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
return 0;
}
@@ -4810,11 +5086,11 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- if (new_mtu < netdev->min_mtu) {
+ if (new_mtu < (int)netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
- } else if (new_mtu > netdev->max_mtu) {
+ } else if (new_mtu > (int)netdev->max_mtu) {
netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
netdev->min_mtu);
return -EINVAL;
@@ -4835,7 +5111,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- netdev->mtu = new_mtu;
+ netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
@@ -4859,6 +5135,118 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * ice_aq_str - convert AQ err code to a string
+ * @aq_err: the AQ error code to convert
+ */
+const char *ice_aq_str(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_OK:
+ return "OK";
+ case ICE_AQ_RC_EPERM:
+ return "ICE_AQ_RC_EPERM";
+ case ICE_AQ_RC_ENOENT:
+ return "ICE_AQ_RC_ENOENT";
+ case ICE_AQ_RC_ENOMEM:
+ return "ICE_AQ_RC_ENOMEM";
+ case ICE_AQ_RC_EBUSY:
+ return "ICE_AQ_RC_EBUSY";
+ case ICE_AQ_RC_EEXIST:
+ return "ICE_AQ_RC_EEXIST";
+ case ICE_AQ_RC_EINVAL:
+ return "ICE_AQ_RC_EINVAL";
+ case ICE_AQ_RC_ENOSPC:
+ return "ICE_AQ_RC_ENOSPC";
+ case ICE_AQ_RC_ENOSYS:
+ return "ICE_AQ_RC_ENOSYS";
+ case ICE_AQ_RC_EMODE:
+ return "ICE_AQ_RC_EMODE";
+ case ICE_AQ_RC_ENOSEC:
+ return "ICE_AQ_RC_ENOSEC";
+ case ICE_AQ_RC_EBADSIG:
+ return "ICE_AQ_RC_EBADSIG";
+ case ICE_AQ_RC_ESVN:
+ return "ICE_AQ_RC_ESVN";
+ case ICE_AQ_RC_EBADMAN:
+ return "ICE_AQ_RC_EBADMAN";
+ case ICE_AQ_RC_EBADBUF:
+ return "ICE_AQ_RC_EBADBUF";
+ }
+
+ return "ICE_AQ_RC_UNKNOWN";
+}
+
+/**
+ * ice_stat_str - convert status err code to a string
+ * @stat_err: the status error code to convert
+ */
+const char *ice_stat_str(enum ice_status stat_err)
+{
+ switch (stat_err) {
+ case ICE_SUCCESS:
+ return "OK";
+ case ICE_ERR_PARAM:
+ return "ICE_ERR_PARAM";
+ case ICE_ERR_NOT_IMPL:
+ return "ICE_ERR_NOT_IMPL";
+ case ICE_ERR_NOT_READY:
+ return "ICE_ERR_NOT_READY";
+ case ICE_ERR_NOT_SUPPORTED:
+ return "ICE_ERR_NOT_SUPPORTED";
+ case ICE_ERR_BAD_PTR:
+ return "ICE_ERR_BAD_PTR";
+ case ICE_ERR_INVAL_SIZE:
+ return "ICE_ERR_INVAL_SIZE";
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ return "ICE_ERR_DEVICE_NOT_SUPPORTED";
+ case ICE_ERR_RESET_FAILED:
+ return "ICE_ERR_RESET_FAILED";
+ case ICE_ERR_FW_API_VER:
+ return "ICE_ERR_FW_API_VER";
+ case ICE_ERR_NO_MEMORY:
+ return "ICE_ERR_NO_MEMORY";
+ case ICE_ERR_CFG:
+ return "ICE_ERR_CFG";
+ case ICE_ERR_OUT_OF_RANGE:
+ return "ICE_ERR_OUT_OF_RANGE";
+ case ICE_ERR_ALREADY_EXISTS:
+ return "ICE_ERR_ALREADY_EXISTS";
+ case ICE_ERR_NVM_CHECKSUM:
+ return "ICE_ERR_NVM_CHECKSUM";
+ case ICE_ERR_BUF_TOO_SHORT:
+ return "ICE_ERR_BUF_TOO_SHORT";
+ case ICE_ERR_NVM_BLANK_MODE:
+ return "ICE_ERR_NVM_BLANK_MODE";
+ case ICE_ERR_IN_USE:
+ return "ICE_ERR_IN_USE";
+ case ICE_ERR_MAX_LIMIT:
+ return "ICE_ERR_MAX_LIMIT";
+ case ICE_ERR_RESET_ONGOING:
+ return "ICE_ERR_RESET_ONGOING";
+ case ICE_ERR_HW_TABLE:
+ return "ICE_ERR_HW_TABLE";
+ case ICE_ERR_DOES_NOT_EXIST:
+ return "ICE_ERR_DOES_NOT_EXIST";
+ case ICE_ERR_FW_DDP_MISMATCH:
+ return "ICE_ERR_FW_DDP_MISMATCH";
+ case ICE_ERR_AQ_ERROR:
+ return "ICE_ERR_AQ_ERROR";
+ case ICE_ERR_AQ_TIMEOUT:
+ return "ICE_ERR_AQ_TIMEOUT";
+ case ICE_ERR_AQ_FULL:
+ return "ICE_ERR_AQ_FULL";
+ case ICE_ERR_AQ_NO_WORK:
+ return "ICE_ERR_AQ_NO_WORK";
+ case ICE_ERR_AQ_EMPTY:
+ return "ICE_ERR_AQ_EMPTY";
+ case ICE_ERR_AQ_FW_CRITICAL:
+ return "ICE_ERR_AQ_FW_CRITICAL";
+ }
+
+ return "ICE_ERR_UNKNOWN";
+}
+
+/**
* ice_set_rss - Set RSS keys and lut
* @vsi: Pointer to VSI structure
* @seed: RSS hash seed
@@ -4882,8 +5270,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4892,8 +5281,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4924,8 +5314,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
- dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -4934,8 +5325,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
- dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n",
- status, hw->adminq.rq_last_status);
+ dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
}
@@ -5002,8 +5394,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
- bmode, status, hw->adminq.sq_last_status);
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
+ bmode, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -5072,8 +5465,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
- netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
- mode, status, hw->adminq.sq_last_status);
+ netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
+ mode, ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return -EIO;
@@ -5100,6 +5494,16 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
pf->tx_timeout_count++;
+ /* Check if PFC is enabled for the TC to which the queue belongs
+ * to. If yes then Tx timeout is not caused by a hung queue, no
+ * need to reset and rebuild
+ */
+ if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
+ dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
+ txqueue);
+ return;
+ }
+
/* now that we have an index, find the tx_ring struct */
for (i = 0; i < vsi->num_txq; i++)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
@@ -5158,6 +5562,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
+ * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ */
+static void
+ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ u16 port = ntohs(ti->port);
+ enum ice_status status;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ tnl_type = TNL_VXLAN;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ tnl_type = TNL_GENEVE;
+ break;
+ default:
+ netdev_err(netdev, "Unknown tunnel type\n");
+ return;
+ }
+
+ status = ice_create_tunnel(&pf->hw, tnl_type, port);
+ if (status == ICE_ERR_OUT_OF_RANGE)
+ netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
+ port);
+ else if (status)
+ netdev_err(netdev, "Error adding UDP tunnel - %s\n",
+ ice_stat_str(status));
+}
+
+/**
+ * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
+ * @netdev: This physical port's netdev
+ * @ti: Tunnel endpoint information
+ */
+static void
+ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 port = ntohs(ti->port);
+ enum ice_status status;
+ bool retval;
+
+ retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
+ if (!retval) {
+ netdev_info(netdev, "port %d not found in UDP tunnels list\n",
+ port);
+ return;
+ }
+
+ status = ice_destroy_tunnel(&pf->hw, port, false);
+ if (status)
+ netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
+ port);
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -5173,14 +5641,20 @@ int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
int err;
- if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
+ if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
return -EIO;
}
+ if (test_bit(__ICE_DOWN, pf->state)) {
+ netdev_err(netdev, "device is not ready yet\n");
+ return -EBUSY;
+ }
+
netif_carrier_off(netdev);
pi = vsi->port_info;
@@ -5213,6 +5687,10 @@ int ice_open(struct net_device *netdev)
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
+
+ /* Update existing tunnels information */
+ udp_tunnel_get_rx_info(netdev);
+
return err;
}
@@ -5263,21 +5741,21 @@ ice_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data;
- if (len & ~(ICE_TXD_MACLEN_MAX))
+ if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb);
- if (len & ~(ICE_TXD_IPLEN_MAX))
+ if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len & ~(ICE_TXD_L4LEN_MAX))
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_inner_transport_header(skb) -
skb_inner_network_header(skb);
- if (len & ~(ICE_TXD_IPLEN_MAX))
+ if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
@@ -5322,8 +5800,13 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bridge_setlink = ice_bridge_setlink,
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = ice_rx_flow_steer,
+#endif
.ndo_tx_timeout = ice_tx_timeout,
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_udp_tunnel_add = ice_udp_tunnel_add,
+ .ndo_udp_tunnel_del = ice_udp_tunnel_del,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 8beb675d676b..b049c1c30c88 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -172,7 +172,8 @@ void ice_release_nvm(struct ice_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+static enum ice_status
+ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
@@ -196,7 +197,7 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+static enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
@@ -367,6 +368,87 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
}
/**
+ * ice_get_netlist_ver_info
+ * @hw: pointer to the HW struct
+ *
+ * Get the netlist version information
+ */
+static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
+{
+ struct ice_netlist_ver_info *ver = &hw->netlist_ver;
+ enum ice_status ret;
+ u32 id_blk_start;
+ __le16 raw_data;
+ u16 data, i;
+ u16 *buff;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+ buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
+ GFP_KERNEL);
+ if (!buff) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto exit_no_mem;
+ }
+
+ /* read module length */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+
+ data = le16_to_cpu(raw_data);
+ /* exit if length is = 0 */
+ if (!data)
+ goto exit_error;
+
+ /* read node count */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+ data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
+
+ /* netlist ID block starts from offset 4 + node count * 2 */
+ id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
+
+ /* read the entire netlist ID block */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ id_blk_start * 2,
+ ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
+ false, NULL);
+ if (ret)
+ goto exit_error;
+
+ for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
+ buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
+
+ ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
+ ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
+ ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
+ ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
+
+exit_error:
+ kfree(buff);
+exit_no_mem:
+ ice_release_nvm(hw);
+ return ret;
+}
+
+/**
* ice_discover_flash_size - Discover the available flash size.
* @hw: pointer to the HW struct
*
@@ -515,6 +597,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
+ /* read the netlist version information */
+ status = ice_get_netlist_ver_info(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 999f273ba6ad..165eda07b93d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -11,10 +11,6 @@ enum ice_status
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
enum ice_status
-ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
- u16 module_type);
-enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 71647566964e..7f4c1ec1eff2 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -12,12 +12,15 @@
*/
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
+ ICE_PROT_MAC_OF_OR_S = 1,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_TCP_IL = 49,
+ ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
+ ICE_PROT_GRE_OF = 64,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index eae707ddf8e8..0475134295e4 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1714,8 +1714,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* This function removes single aggregator VSI info entry from
* aggregator list.
*/
-static void
-ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
@@ -1917,7 +1916,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*/
static enum ice_status
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
- enum ice_rl_type rl_type, u8 bw_alloc)
+ enum ice_rl_type rl_type, u16 bw_alloc)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
@@ -1947,8 +1946,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
@@ -1967,8 +1965,7 @@ ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
@@ -1993,8 +1990,7 @@ ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save or clear shared bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index a9a8bc3aca42..4028c6365172 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -27,6 +27,8 @@ enum ice_status {
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_HW_TABLE = -19,
+ ICE_ERR_FW_DDP_MISMATCH = -20,
+
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
@@ -35,6 +37,7 @@ enum ice_status {
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
+ ICE_ERR_AQ_FW_CRITICAL = -105,
};
#endif /* _ICE_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 51825a203e35..ff7d16ac693e 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -593,8 +593,8 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true;
- res_type = le16_to_cpu(ele->vsi_port_num) >>
- ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
+ res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
+ ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */
@@ -1612,18 +1612,17 @@ exit:
* check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function.
*/
-enum ice_status
-ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
+enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
struct list_head *rule_head;
- u16 elem_sent, total_elem_left;
+ u16 total_elem_left, s_rule_size;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
u16 num_unicast = 0;
- u16 s_rule_size;
+ u8 elem_sent;
if (!m_list || !hw)
return ICE_ERR_PARAM;
@@ -1707,8 +1706,8 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
total_elem_left -= elem_sent) {
struct ice_aqc_sw_rules_elem *entry = r_iter;
- elem_sent = min(total_elem_left,
- (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
+ elem_sent = min_t(u8, total_elem_left,
+ (ICE_AQ_MAX_BUF_LEN / s_rule_size));
status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
elem_sent, ice_aqc_opc_add_sw_rules,
NULL);
@@ -1914,8 +1913,7 @@ exit:
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status
-ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
+enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr;
@@ -2145,8 +2143,7 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* the entries passed into m_list were added previously. It will not attempt to
* do a partial remove of entries that were found.
*/
-enum ice_status
-ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
+enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
@@ -2678,6 +2675,81 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_alloc_res_cntr - allocating resource counter
+ * @hw: pointer to the hardware structure
+ * @type: type of resource
+ * @alloc_shared: if set it is shared else dedicated
+ * @num_items: number of entries requested for FD resource type
+ * @counter_id: counter index returned by AQ call
+ */
+enum ice_status
+ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 *counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ /* Allocate resource */
+ buf_len = sizeof(*buf);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->num_elems = cpu_to_le16(num_items);
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | alloc_shared);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto exit;
+
+ *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
+
+exit:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_free_res_cntr - free resource counter
+ * @hw: pointer to the hardware structure
+ * @type: type of resource
+ * @alloc_shared: if set it is shared else dedicated
+ * @num_items: number of entries to be freed for FD resource type
+ * @counter_id: counter ID resource which needs to be freed
+ */
+enum ice_status
+ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ /* Free resource */
+ buf_len = sizeof(*buf);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->num_elems = cpu_to_le16(num_items);
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW,
+ "counter resource could not be freed\n");
+
+ kfree(buf);
+ return status;
+}
+
+/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index fa14b9545dab..8b4f9d35c860 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -208,6 +208,13 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw);
/* Switch config */
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+enum ice_status
+ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 *counter_id);
+enum ice_status
+ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 counter_id);
+
/* Switch/bridge related commands */
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index f67e8362958c..abdb137c8bb7 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -15,6 +15,90 @@
#define ICE_RX_HDR_SIZE 256
+#define FDIR_DESC_RXDID 0x40
+#define ICE_FDIR_CLEAN_DELAY 10
+
+/**
+ * ice_prgm_fdir_fltr - Program a Flow Director filter
+ * @vsi: VSI to send dummy packet
+ * @fdir_desc: flow director descriptor
+ * @raw_packet: allocated buffer for flow director
+ */
+int
+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
+ u8 *raw_packet)
+{
+ struct ice_tx_buf *tx_buf, *first;
+ struct ice_fltr_desc *f_desc;
+ struct ice_tx_desc *tx_desc;
+ struct ice_ring *tx_ring;
+ struct device *dev;
+ dma_addr_t dma;
+ u32 td_cmd;
+ u16 i;
+
+ /* VSI and Tx ring */
+ if (!vsi)
+ return -ENOENT;
+ tx_ring = vsi->tx_rings[0];
+ if (!tx_ring || !tx_ring->desc)
+ return -ENOENT;
+ dev = tx_ring->dev;
+
+ /* we are using two descriptors to add/del a filter and we can wait */
+ for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
+ if (!i)
+ return -EAGAIN;
+ msleep_interruptible(1);
+ }
+
+ dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma))
+ return -EINVAL;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ first = &tx_ring->tx_buf[i];
+ f_desc = ICE_TX_FDIRDESC(tx_ring, i);
+ memcpy(f_desc, fdir_desc, sizeof(*f_desc));
+
+ i++;
+ i = (i < tx_ring->count) ? i : 0;
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_buf[i];
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
+ ICE_TX_DESC_CMD_RE;
+
+ tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->raw_buf = raw_packet;
+
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
+
+ /* Force memory write to complete before letting h/w know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ /* mark the data descriptor to be watched */
+ first->next_to_watch = tx_desc;
+
+ writel(tx_ring->next_to_use, tx_ring->tail);
+
+ return 0;
+}
+
/**
* ice_unmap_and_free_tx_buf - Release a Tx buffer
* @ring: the ring that owns the buffer
@@ -24,7 +108,9 @@ static void
ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- if (ice_ring_is_xdp(ring))
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ else if (ice_ring_is_xdp(ring))
page_frag_free(tx_buf->raw_buf);
else
dev_kfree_skb_any(tx_buf->skb);
@@ -423,6 +509,22 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
+static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ice_rx_offset(rx_ring) ?
+ SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
/**
* ice_run_xdp - Executes an XDP program on initialized xdp_buff
* @rx_ring: Rx ring
@@ -583,7 +685,8 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
+ if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
+ !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
@@ -803,7 +906,7 @@ static struct sk_buff *
ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
+ u8 metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#else
@@ -918,7 +1021,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ u16 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
@@ -981,7 +1084,7 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* Returns amount of work completed
*/
-static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
@@ -991,6 +1094,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
bool failure;
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+#endif
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
@@ -1020,6 +1127,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
+ ice_put_rx_buf(rx_ring, NULL);
+ cleaned_count++;
+ continue;
+ }
+
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
@@ -1038,6 +1151,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
xdp.data_meta = xdp.data;
xdp.data_end = xdp.data + size;
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+#endif
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
@@ -1051,16 +1168,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
- size);
-#endif
xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
} else {
rx_buf->pagecnt_bias++;
}
@@ -1528,7 +1637,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* don't allow the budget to go below 1 because that would exit
* polling early.
*/
- budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
+ budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
else
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
@@ -1664,7 +1773,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
*/
while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, max_data, td_tag);
+ ice_build_ctob(td_cmd, td_offset, max_data,
+ td_tag);
tx_desc++;
i++;
@@ -1684,8 +1794,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
if (likely(!data_len))
break;
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
- size, td_tag);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
+ size, td_tag);
tx_desc++;
i++;
@@ -1716,8 +1826,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
/* write last descriptor with RS and EOP bits */
td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
- td_tag);
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(td_cmd, td_offset, size, td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
@@ -1791,12 +1901,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- if (skb->encapsulation)
- return -1;
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ first->tx_flags |= ICE_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ first->tx_flags |= ICE_TX_FLAGS_IPV6;
+
+ if (skb->encapsulation) {
+ bool gso_ena = false;
+ u32 tunnel = 0;
+
+ /* define outer network header type */
+ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
+ tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
+ ICE_TX_CTX_EIPT_IPV4 :
+ ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
+ l4_proto = ip.v4->protocol;
+ } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+ tunnel |= ICE_TX_CTX_EIPT_IPV6;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ }
+
+ /* define outer transport */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ break;
+ case IPPROTO_GRE:
+ tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
+ default:
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ return -1;
+
+ skb_checksum_help(skb);
+ return 0;
+ }
+
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ ICE_TXD_CTX_QW0_EIPLEN_S;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* compute tunnel header size */
+ tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+ ICE_TXD_CTX_QW0_NATLEN_S;
+
+ gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
+ /* indicate if we need to offload outer UDP header */
+ if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
+
+ /* record tunnel offload values */
+ off->cd_tunnel_params |= tunnel;
+
+ /* set DTYP=1 to indicate that it's an Tx context descriptor
+ * in IPsec tunnel mode with Tx offloads in Quad word 1
+ */
+ off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ first->tx_flags |= ICE_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
+ first->tx_flags |= ICE_TX_FLAGS_IPV6;
+ }
/* Enable IP checksum offloads */
- protocol = vlan_get_protocol(skb);
- if (protocol == htons(ETH_P_IP)) {
+ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
@@ -1806,7 +1998,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
else
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
- } else if (protocol == htons(ETH_P_IPV6)) {
+ } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
@@ -1861,49 +2053,25 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
*
* Checks the skb and set up correspondingly several generic transmit flags
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
- *
- * Returns error code indicate the frame should be dropped upon error and the
- * otherwise returns 0 to indicate the flags has been set properly.
*/
-static int
+static void
ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
- __be16 protocol = skb->protocol;
-
- if (protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
- /* when HW VLAN acceleration is turned off by the user the
- * stack sets the protocol to 8021q so that the driver
- * can take any steps required to support the SW only
- * VLAN handling. In our case the driver doesn't need
- * to take any further steps so just set the protocol
- * to the encapsulated ethertype.
- */
- skb->protocol = vlan_get_protocol(skb);
- return 0;
- }
- /* if we have a HW VLAN tag being added, default to the HW one */
+ /* nothing left to do, software offloaded VLAN */
+ if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
+ return;
+
+ /* currently, we always assume 802.1Q for VLAN insertion as VLAN
+ * insertion for 802.1AD is not supported
+ */
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_hdr *vhdr, _vhdr;
-
- /* for SW VLAN, check the next protocol and store the tag */
- vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
- sizeof(_vhdr),
- &_vhdr);
- if (!vhdr)
- return -EINVAL;
-
- first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
- ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
}
- return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
+ ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/**
@@ -1928,7 +2096,8 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
- u32 paylen, l4_start;
+ u32 paylen;
+ u8 l4_start;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1953,8 +2122,42 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ip.v6->payload_len = 0;
}
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
+ /* determine offset of outer transport header */
+ l4_start = (u8)(l4.hdr - skb->data);
+
+ /* remove payload length from outer checksum */
+ paylen = skb->len - l4_start;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
+
+ /* reset pointers to inner headers */
+
+ /* cppcheck-suppress unreadVariable */
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
+ }
+
/* determine offset of transport header */
- l4_start = l4.hdr - skb->data;
+ l4_start = (u8)(l4.hdr - skb->data);
/* remove payload length from checksum */
paylen = skb->len - l4_start;
@@ -1963,12 +2166,12 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
/* compute length of UDP segmentation header */
- off->header_len = sizeof(l4.udp) + l4_start;
+ off->header_len = (u8)sizeof(l4.udp) + l4_start;
} else {
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
/* compute length of TCP segmentation header */
- off->header_len = (l4.tcp->doff * 4) + l4_start;
+ off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
}
/* update gso_segs and bytecount */
@@ -2176,8 +2379,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
first->tx_flags = 0;
/* prepare the VLAN tagging flags for Tx */
- if (ice_tx_prepare_vlan_flags(tx_ring, first))
- goto out_drop;
+ ice_tx_prepare_vlan_flags(tx_ring, first);
/* set up TSO offload */
tso = ice_tso(first, &offload);
@@ -2199,7 +2401,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
- int i = tx_ring->next_to_use;
+ u16 i = tx_ring->next_to_use;
/* grab the next descriptor */
cdesc = ICE_TX_CTX_DESC(tx_ring, i);
@@ -2244,3 +2446,86 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return ice_xmit_frame_ring(skb, tx_ring);
}
+
+/**
+ * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
+ * @tx_ring: tx_ring to clean
+ */
+void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
+{
+ struct ice_vsi *vsi = tx_ring->vsi;
+ s16 i = tx_ring->next_to_clean;
+ int budget = ICE_DFLT_IRQ_WORK;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+
+ tx_buf = &tx_ring->tx_buf[i];
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no pending work */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if the descriptor isn't done, no work to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+ tx_desc->buf_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move past filter desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap the data header */
+ if (dma_unmap_len(tx_buf, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ devm_kfree(tx_ring->dev, tx_buf->raw_buf);
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->raw_buf = NULL;
+ tx_buf->tx_flags = 0;
+ tx_buf->next_to_watch = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_desc->buf_addr = 0;
+ tx_desc->cmd_type_offset_bsz = 0;
+
+ /* move past eop_desc for start of next FD desc */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ /* re-enable interrupt if needed */
+ ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 7ee00a128663..e70c4619edc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -38,7 +38,8 @@
*/
#if (PAGE_SIZE < 8192)
#define ICE_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
+ ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
+ SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
/**
* ice_compute_pad - compute the padding
@@ -107,12 +108,19 @@ static inline int ice_skb_pad(void)
#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
#define ICE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+ (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
+/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
+ * freed instead of returned like skb packets.
+ */
+#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+#define ICE_TX_FLAGS_IPV4 BIT(5)
+#define ICE_TX_FLAGS_IPV6 BIT(6)
+#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
@@ -155,17 +163,16 @@ struct ice_tx_offload_params {
};
struct ice_rx_buf {
- struct sk_buff *skb;
- dma_addr_t dma;
union {
struct {
+ struct sk_buff *skb;
+ dma_addr_t dma;
struct page *page;
unsigned int page_offset;
u16 pagecnt_bias;
};
struct {
- void *addr;
- u64 handle;
+ struct xdp_buff *xdp;
};
};
};
@@ -289,7 +296,6 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
struct xdp_umem *xsk_umem;
- struct zero_copy_allocator zca;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
@@ -373,5 +379,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring);
void ice_free_tx_ring(struct ice_ring *tx_ring);
void ice_free_rx_ring(struct ice_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
-
+int
+ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
+ u8 *raw_packet);
+int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
+void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 6da048a6ca7c..02b12736ea80 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -8,7 +8,7 @@
* @rx_ring: ring to bump
* @val: new head index
*/
-void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
@@ -84,11 +84,11 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
- u32 rx_error, rx_status;
+ u16 rx_status0, rx_status1;
bool ipv4, ipv6;
- rx_status = le16_to_cpu(rx_desc->wb.status_error0);
- rx_error = rx_status;
+ rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
decoded = ice_decode_rx_desc_ptype(ptype);
@@ -101,7 +101,7 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
return;
/* check if HW has decoded the packet and checksum */
- if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
if (!(decoded.known && decoded.outer_ip))
@@ -112,19 +112,31 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
- if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
goto checksum_fail;
- else if (ipv6 && (rx_status &
- (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
+
+ if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
goto checksum_fail;
/* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed
*/
- if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ goto checksum_fail;
+
+ /* check for outer UDP checksum error in tunneled packets */
+ if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
+ (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
goto checksum_fail;
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
+ */
+ if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
@@ -215,8 +227,8 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
- size, 0);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ size, 0);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
@@ -242,7 +254,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
*/
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
{
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return ICE_XDP_CONSUMED;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index ba9164dad9ae..58ff58f0f972 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -22,7 +22,7 @@ ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
}
static inline __le64
-build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
+ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{
return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
(td_cmd << ICE_TXD_QW1_CMD_S) |
@@ -49,7 +49,7 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
-void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);
+void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4ce5f92fca4a..c1ad8622e65c 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -118,7 +118,8 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
- ICE_VSI_VF,
+ ICE_VSI_VF = 1,
+ ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_LB = 6,
};
@@ -161,6 +162,38 @@ struct ice_phy_info {
u8 get_link_info;
};
+/* protocol enumeration for filters */
+enum ice_fltr_ptype {
+ /* NONE - used for undef/error */
+ ICE_FLTR_PTYPE_NONF_NONE = 0,
+ ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+ ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+ ICE_FLTR_PTYPE_FRAG_IPV4,
+ ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV6_SCTP,
+ ICE_FLTR_PTYPE_NONF_IPV6_OTHER,
+ ICE_FLTR_PTYPE_MAX,
+};
+
+enum ice_fd_hw_seg {
+ ICE_FD_HW_SEG_NON_TUN = 0,
+ ICE_FD_HW_SEG_TUN,
+ ICE_FD_HW_SEG_MAX,
+};
+
+/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */
+#define ICE_MAX_FDIR_VSI_PER_FILTER 2
+
+struct ice_fd_hw_prof {
+ struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX];
+ int cnt;
+ u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
+ u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
+};
+
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
u32 valid_functions;
@@ -197,6 +230,8 @@ struct ice_hw_func_caps {
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
u32 guar_num_vsi;
+ u32 fd_fltr_guar; /* Number of filters guaranteed */
+ u32 fd_fltr_best_effort; /* Number of best effort filters */
};
/* Device wide capabilities */
@@ -204,6 +239,7 @@ struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
u32 num_funcs;
};
@@ -259,6 +295,16 @@ struct ice_nvm_info {
#define ICE_NVM_VER_LEN 32
+/* netlist version information */
+struct ice_netlist_ver_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
@@ -479,6 +525,8 @@ struct ice_hw {
u64 debug_mask; /* bitmap for debug mask */
enum ice_mac_type mac_type;
+ u16 fd_ctr_base; /* FD counter base index */
+
/* pci info */
u16 device_id;
u16 vendor_id;
@@ -491,8 +539,8 @@ struct ice_hw {
u16 max_burst_size; /* driver sets this value */
/* Tx Scheduler values */
- u16 num_tx_sched_layers;
- u16 num_tx_sched_phys_layers;
+ u8 num_tx_sched_layers;
+ u8 num_tx_sched_phys_layers;
u8 flattened_layers;
u8 max_cgds;
u8 sw_entry_point_layer;
@@ -506,6 +554,7 @@ struct ice_hw {
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
+ struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */
@@ -548,6 +597,7 @@ struct ice_hw {
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
+ u32 active_track_id;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
@@ -568,10 +618,29 @@ struct ice_hw {
u8 *pkg_copy;
u32 pkg_size;
+ /* tunneling info */
+ struct mutex tnl_lock;
+ struct ice_tunnel_table tnl;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
struct list_head fl_profs[ICE_BLK_COUNT];
+
+ /* Flow Director filter info */
+ int fdir_active_fltr;
+
+ struct mutex fdir_fltr_lock; /* protect Flow Director */
+ struct list_head fdir_list_head;
+
+ /* Book-keeping of side-band filter count per flow-type.
+ * This is used to detect and handle input set changes for
+ * respective flow-type.
+ */
+ u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX];
+
+ struct ice_fd_hw_prof **fdir_prof;
+ DECLARE_BITMAP(fdir_perfect_fltr, ICE_FLTR_PTYPE_MAX);
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
};
@@ -592,6 +661,8 @@ struct ice_eth_stats {
u64 tx_errors; /* tepc */
};
+#define ICE_MAX_UP 8
+
/* Statistics collected by the MAC */
struct ice_hw_port_stats {
/* eth stats collected by the port */
@@ -631,6 +702,9 @@ struct ice_hw_port_stats {
u64 tx_size_1023; /* ptc1023 */
u64 tx_size_1522; /* ptc1522 */
u64 tx_size_big; /* ptc9522 */
+ /* flow director stats */
+ u32 fd_sb_status;
+ u64 fd_sb_match;
};
/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 15191a325918..16a2f2526ccc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -4,16 +4,18 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
+#include "ice_fltr.h"
/**
* ice_validate_vf_id - helper to check if VF ID is valid
* @pf: pointer to the PF structure
* @vf_id: the ID of the VF to check
*/
-static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
+static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
{
+ /* vf_id range is only valid for 0-255, and should always be unsigned */
if (vf_id >= pf->num_alloc_vfs) {
- dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
+ dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
return -EINVAL;
}
return 0;
@@ -27,7 +29,7 @@ static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
{
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
+ dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
vf->vf_id);
return -EBUSY;
}
@@ -35,6 +37,37 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
+ * ice_err_to_virt_err - translate errors for VF return code
+ * @ice_err: error return code
+ */
+static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
+{
+ switch (ice_err) {
+ case ICE_SUCCESS:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case ICE_ERR_BAD_PTR:
+ case ICE_ERR_INVAL_SIZE:
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ case ICE_ERR_PARAM:
+ case ICE_ERR_CFG:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case ICE_ERR_NO_MEMORY:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case ICE_ERR_NOT_READY:
+ case ICE_ERR_RESET_FAILED:
+ case ICE_ERR_FW_API_VER:
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_FULL:
+ case ICE_ERR_AQ_NO_WORK:
+ case ICE_ERR_AQ_EMPTY:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -47,7 +80,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
- int i;
+ unsigned int i;
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
@@ -149,6 +182,26 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
}
/**
+ * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * @vf: VF to remove access to VSI for
+ */
+static void ice_vf_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->lan_vsi_idx = ICE_NO_VSI;
+ vf->lan_vsi_num = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
+ * @vf: invalidate this VF's VSI after freeing it
+ */
+static void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
+ ice_vf_invalidate_vsi(vf);
+}
+
+/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
@@ -163,10 +216,8 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
/* free VSI and disconnect it from the parent uplink */
- if (vf->lan_vsi_idx) {
- ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
- vf->lan_vsi_idx = 0;
- vf->lan_vsi_num = 0;
+ if (vf->lan_vsi_idx != ICE_NO_VSI) {
+ ice_vf_vsi_release(vf);
vf->num_mac = 0;
}
@@ -292,7 +343,7 @@ void ice_free_vfs(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- int tmp, i;
+ unsigned int tmp, i;
if (!pf->vf)
return;
@@ -337,7 +388,7 @@ void ice_free_vfs(struct ice_pf *pf)
* before this function ever gets called.
*/
if (!pci_vfs_assigned(pf->pdev)) {
- int vf_id;
+ unsigned int vf_id;
/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
@@ -368,9 +419,9 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
{
struct ice_pf *pf = vf->pf;
u32 reg, reg_idx, bit_idx;
+ unsigned int vf_abs_id, i;
struct device *dev;
struct ice_hw *hw;
- int vf_abs_id, i;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
@@ -380,10 +431,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
- * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
- * It's normally disabled in ice_free_vf_res(), but it's safer
- * to do it earlier to give some time to finish to any VF config
- * functions that may still be running at this point.
+ * when it's safe again to access VF's VSI.
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
@@ -418,7 +466,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
- dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
+ dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -460,8 +508,9 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
+ ice_stat_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
@@ -475,18 +524,39 @@ out:
}
/**
+ * ice_vf_get_port_info - Get the VF's port info structure
+ * @vf: VF used to get the port info structure for
+ */
+static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
+{
+ return vf->pf->hw.port_info;
+}
+
+/**
* ice_vf_vsi_setup - Set up a VF VSI
- * @pf: board private structure
- * @pi: pointer to the port_info instance
- * @vf_id: defines VF ID to which this VSI connects.
+ * @vf: VF to setup VSI for
*
* Returns pointer to the successfully allocated VSI struct on success,
* otherwise returns NULL on failure.
*/
-static struct ice_vsi *
-ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
+static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
+ ice_vf_invalidate_vsi(vf);
+ return NULL;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return vsi;
}
/**
@@ -507,170 +577,158 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_alloc_vsi_res - Setup VF VSI and its resources
- * @vf: pointer to the VF structure
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
*
- * Returns 0 on success, negative value on failure
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
*/
-static int ice_alloc_vsi_res(struct ice_vf *vf)
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- LIST_HEAD(tmp_add_list);
- u8 broadcast[ETH_ALEN];
- struct ice_vsi *vsi;
- struct device *dev;
- int status = 0;
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u16 vlan_id = 0;
+ int err;
- dev = ice_pf_to_dev(pf);
- /* first vector index is the VFs OICR index */
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+ if (vf->port_vlan_info) {
+ err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
- vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
- if (!vsi) {
- dev_err(dev, "Failed to create VF VSI\n");
- return -ENOMEM;
+ vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
}
- vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
-
- /* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_info) {
- ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
- if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
- dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
- vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
- } else {
- /* set VLAN 0 filter by default when no port VLAN is
- * enabled. If a port VLAN is enabled we don't want
- * untagged broadcast/multicast traffic seen on the VF
- * interface.
- */
- if (ice_vsi_add_vlan(vsi, 0))
- dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
- vf->vf_id);
+ /* vlan_id will either be 0 or the port VLAN number */
+ err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
+ vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
+ err);
+ return err;
}
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ enum ice_status status;
+ u8 broadcast[ETH_ALEN];
+
eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
+ vf->vf_id, ice_stat_str(status));
+ return ice_status_to_errno(status);
+ }
- status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
- if (status)
- goto ice_alloc_vsi_res_exit;
+ vf->num_mac++;
if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
- status = ice_add_mac_to_list(vsi, &tmp_add_list,
- vf->dflt_lan_addr.addr);
- if (status)
- goto ice_alloc_vsi_res_exit;
+ status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
+ &vf->dflt_lan_addr.addr[0], vf->vf_id,
+ ice_stat_str(status));
+ return ice_status_to_errno(status);
+ }
+ vf->num_mac++;
}
- status = ice_add_mac(&pf->hw, &tmp_add_list);
- if (status)
- dev_err(dev, "could not add mac filters error %d\n", status);
- else
- vf->num_mac = 1;
-
- /* Clear this bit after VF initialization since we shouldn't reclaim
- * and reassign interrupts for synchronous or asynchronous VFR events.
- * We don't want to reconfigure interrupts since AVF driver doesn't
- * expect vector assignment to be changed unless there is a request for
- * more vectors.
- */
-ice_alloc_vsi_res_exit:
- ice_free_fltr_list(dev, &tmp_add_list);
- return status;
+ return 0;
}
/**
- * ice_alloc_vf_res - Allocate VF resources
- * @vf: pointer to the VF structure
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
*/
-static int ice_alloc_vf_res(struct ice_vf *vf)
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- int tx_rx_queue_left;
- int status;
-
- /* Update number of VF queues, in case VF had requested for queue
- * changes
- */
- tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- tx_rx_queue_left += pf->num_qps_per_vf;
- if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
- vf->num_req_qs != vf->num_vf_qs)
- vf->num_vf_qs = vf->num_req_qs;
-
- /* setup VF VSI and necessary resources */
- status = ice_alloc_vsi_res(vf);
- if (status)
- goto ice_alloc_vf_res_exit;
-
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-
- /* VF is now completely initialized */
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-
- return status;
-
-ice_alloc_vf_res_exit:
- ice_free_vf_res(vf);
- return status;
}
/**
- * ice_ena_vf_mappings
- * @vf: pointer to the VF structure
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
+ * @vf: VF to enable MSIX mappings for
*
- * Enable VF vectors and queues allocation by writing the details into
- * respective registers.
+ * Some of the registers need to be indexed/configured using hardware global
+ * device values and other registers need 0-based values, which represent PF
+ * based values.
*/
-static void ice_ena_vf_mappings(struct ice_vf *vf)
+static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
{
- int abs_vf_id, abs_first, abs_last;
+ int device_based_first_msix, device_based_last_msix;
+ int pf_based_first_msix, pf_based_last_msix, v;
struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int first, last, v;
+ int device_based_vf_id;
struct ice_hw *hw;
u32 reg;
- dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
- first = vf->first_vector_idx;
- last = (first + pf->num_msix_per_vf) - 1;
- abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
- abs_last = (abs_first + pf->num_msix_per_vf) - 1;
- abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- /* VF Vector allocation */
- reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
- ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
- VPINT_ALLOC_VALID_M);
+ pf_based_first_msix = vf->first_vector_idx;
+ pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
+
+ device_based_first_msix = pf_based_first_msix +
+ pf->hw.func_caps.common_cap.msix_vector_first_id;
+ device_based_last_msix =
+ (device_based_first_msix + pf->num_msix_per_vf) - 1;
+ device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
+ VPINT_ALLOC_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
+ VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
+ reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
& VPINT_ALLOC_PCI_FIRST_M) |
- ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
- VPINT_ALLOC_PCI_VALID_M);
+ ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
+ VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+
/* map the interrupts to its functions */
- for (v = first; v <= last; v++) {
- reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
+ reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
GLINT_VECT2FUNC_VF_NUM_M) |
((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
GLINT_VECT2FUNC_PF_NUM_M));
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
- /* Map mailbox interrupt. We put an explicit 0 here to remind us that
- * VF admin queue interrupts will go to VF MSI-X vector 0.
- */
- wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
+ /* Map mailbox interrupt to VF MSI-X vector 0 */
+ wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
+ * @vf: VF to enable the mappings for
+ * @max_txq: max Tx queues allowed on the VF's VSI
+ * @max_rxq: max Rx queues allowed on the VF's VSI
+ */
+static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
@@ -682,7 +740,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
VPLAN_TX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else {
@@ -700,7 +758,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
VPLAN_RX_QBASE_VFNUMQ_M));
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else {
@@ -709,6 +767,18 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
}
/**
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
+ * @vf: pointer to the VF structure
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+
+ ice_ena_vf_msix_mappings(vf);
+ ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
+}
+
+/**
* ice_determine_res
* @pf: pointer to the PF structure
* @avail_res: available resources in the PF structure
@@ -906,51 +976,18 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
}
/**
- * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
- * @vf: pointer to the VF structure
- *
- * Cleanup a VF after the hardware reset is finished. Expects the caller to
- * have verified whether the reset is finished properly, and ensure the
- * minimum amount of wait time has passed. Reallocate VF resources back to make
- * VF state active
+ * ice_clear_vf_reset_trigger - enable VF to access hardware
+ * @vf: VF to enabled hardware access for
*/
-static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
+ struct ice_hw *hw = &vf->pf->hw;
u32 reg;
- hw = &pf->hw;
-
- /* PF software completes the flow by notifying VF that reset flow is
- * completed. This is done by enabling hardware by clearing the reset
- * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
- * register to VFR completed (done at the end of this function)
- * By doing this we allow HW to access VF memory at any point. If we
- * did it any sooner, HW could access memory while it was being freed
- * in ice_free_vf_res(), causing an IOMMU fault.
- *
- * On the other hand, this needs to be done ASAP, because the VF driver
- * is waiting for this to happen and may report a timeout. It's
- * harmless, but it gets logged into Guest OS kernel log, so best avoid
- * it.
- */
reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
reg &= ~VPGEN_VFRTRIG_VFSWR_M;
wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
-
- /* reallocate VF resources to finish resetting the VSI state */
- if (!ice_alloc_vf_res(vf)) {
- ice_ena_vf_mappings(vf);
- set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- }
-
- /* Tell the VF driver the reset is done. This needs to be done only
- * after VF has been fully initialized, because the VF driver may
- * request resources immediately after setting this flag.
- */
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ ice_flush(hw);
}
/**
@@ -994,44 +1031,124 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
return status;
}
+static void ice_vf_clear_counters(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
+
+ vf->num_mac = 0;
+ vsi->num_vlan = 0;
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+}
+
/**
- * ice_config_res_vfs - Finalize allocation of VFs resources in one go
- * @pf: pointer to the PF structure
+ * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
+ * @vf: VF to perform pre VSI rebuild tasks
*
- * This function is being called as last part of resetting all VFs, or when
- * configuring VFs for the first time, where there is no resource to be freed
- * Returns true if resources were properly allocated for all VFs, and false
- * otherwise.
+ * These tasks are items that don't need to be amortized since they are most
+ * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
*/
-static bool ice_config_res_vfs(struct ice_pf *pf)
+static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int v;
+ ice_vf_clear_counters(vf);
+ ice_clear_vf_reset_trigger(vf);
+}
- if (ice_set_per_vf_res(pf)) {
- dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
- return false;
- }
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
- /* rearm global interrupts */
- if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
- ice_irq_dynamic_ena(hw, NULL, NULL);
+ ice_vf_set_host_trust_cfg(vf);
- /* Finish resetting each VF and allocate resources */
- ice_for_each_vf(pf, v) {
- struct ice_vf *vf = &pf->vf[v];
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
- vf->num_vf_qs = pf->num_qps_per_vf;
- dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
- vf->num_vf_qs);
- ice_cleanup_and_realloc_vf(vf);
+ if (ice_vf_rebuild_host_vlan_cfg(vf))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+}
+
+/**
+ * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
+ * @vf: VF to release and setup the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
+ * configuration change, etc.).
+ */
+static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
+{
+ ice_vf_vsi_release(vf);
+ if (!ice_vf_vsi_setup(vf))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_vsi - rebuild the VF's VSI
+ * @vf: VF to rebuild the VSI for
+ *
+ * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
+ * host, PFR, CORER, etc.).
+ */
+static int ice_vf_rebuild_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (ice_vsi_rebuild(vsi, true)) {
+ dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
+ vf->vf_id);
+ return -EIO;
}
+ /* vsi->idx will remain the same in this case so don't update
+ * vf->lan_vsi_idx
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+ vf->lan_vsi_num = vsi->vsi_num;
- ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
+ return 0;
+}
- return true;
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+static void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+}
+
+/**
+ * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
+ * @vf: VF to perform tasks on
+ */
+static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+
+ ice_vf_rebuild_host_cfg(vf);
+
+ ice_vf_set_initialized(vf);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
/**
@@ -1065,17 +1182,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v)
ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
- ice_for_each_vf(pf, v) {
- struct ice_vsi *vsi;
-
- vf = &pf->vf[v];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
- ice_dis_vf_qs(vf);
- ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
- NULL, ICE_VF_RESET, vf->vf_id, NULL);
- }
-
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
* sequence to make sure that it has completed. We'll keep track of
@@ -1112,21 +1218,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
- ice_free_vf_res(vf);
-
- /* Free VF queues as well, and reallocate later.
- * If a given VF has different number of queues
- * configured, the request for update will come
- * via mailbox communication.
- */
- vf->num_vf_qs = 0;
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi(vf);
+ ice_vf_post_vsi_rebuild(vf);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
- if (!ice_config_res_vfs(pf))
- return false;
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@@ -1238,12 +1336,9 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_err(dev, "disabling promiscuous mode failed\n");
}
- /* free VF resources to begin resetting the VSI state */
- ice_free_vf_res(vf);
-
- ice_cleanup_and_realloc_vf(vf);
-
- ice_flush(hw);
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi_with_release(vf);
+ ice_vf_post_vsi_rebuild(vf);
return true;
}
@@ -1311,16 +1406,144 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
}
/**
- * ice_alloc_vfs - Allocate and set up VFs resources
+ * ice_init_vf_vsi_res - initialize/setup VF VSI resources
+ * @vf: VF to initialize/setup the VSI for
+ *
+ * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
+ * VF VSI's broadcast filter and is only used during initial VF creation.
+ */
+static int ice_init_vf_vsi_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ enum ice_status status;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
+
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
+ return -ENOMEM;
+
+ err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
+ vf->vf_id, ice_stat_str(status));
+ err = ice_status_to_errno(status);
+ goto release_vsi;
+ }
+
+ vf->num_mac = 1;
+
+ return 0;
+
+release_vsi:
+ ice_vf_vsi_release(vf);
+ return err;
+}
+
+/**
+ * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
+ * @pf: PF the VFs are associated with
+ */
+static int ice_start_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int retval, i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_clear_vf_reset_trigger(vf);
+
+ retval = ice_init_vf_vsi_res(vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
+ vf->vf_id, retval);
+ goto teardown;
+ }
+
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ }
+
+ ice_flush(hw);
+ return 0;
+
+teardown:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_dis_vf_mappings(vf);
+ ice_vf_vsi_release(vf);
+ }
+
+ return retval;
+}
+
+/**
+ * ice_set_dflt_settings - set VF defaults during initialization/creation
+ * @pf: PF holding reference to all VFs for default configuration
+ */
+static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ vf->pf = pf;
+ vf->vf_id = i;
+ vf->vf_sw_id = pf->first_sw;
+ /* assign default capabilities */
+ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
+ vf->spoofchk = true;
+ vf->num_vf_qs = pf->num_qps_per_vf;
+ }
+}
+
+/**
+ * ice_alloc_vfs - allocate num_vfs in the PF structure
+ * @pf: PF to store the allocated VFs in
+ * @num_vfs: number of VFs to allocate
+ */
+static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
+{
+ struct ice_vf *vfs;
+
+ vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
+ GFP_KERNEL);
+ if (!vfs)
+ return -ENOMEM;
+
+ pf->vf = vfs;
+ pf->num_alloc_vfs = num_vfs;
+
+ return 0;
+}
+
+/**
+ * ice_ena_vfs - enable VFs so they are ready to be used
* @pf: pointer to the PF structure
- * @num_alloc_vfs: number of VFs to allocate
+ * @num_vfs: number of VFs to enable
*/
-static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- struct ice_vf *vfs;
- int i, ret;
+ int ret;
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
@@ -1328,43 +1551,37 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
set_bit(__ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ ret = pci_enable_sriov(pf->pdev, num_vfs);
if (ret) {
pf->num_alloc_vfs = 0;
goto err_unroll_intr;
}
- /* allocate memory */
- vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
- if (!vfs) {
- ret = -ENOMEM;
- goto err_pci_disable_sriov;
- }
- pf->vf = vfs;
- pf->num_alloc_vfs = num_alloc_vfs;
- /* apply default profile */
- ice_for_each_vf(pf, i) {
- vfs[i].pf = pf;
- vfs[i].vf_sw_id = pf->first_sw;
- vfs[i].vf_id = i;
+ ret = ice_alloc_vfs(pf, num_vfs);
+ if (ret)
+ goto err_pci_disable_sriov;
- /* assign default capabilities */
- set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
- vfs[i].spoofchk = true;
+ if (ice_set_per_vf_res(pf)) {
+ dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
+ num_vfs);
+ ret = -ENOSPC;
+ goto err_unroll_sriov;
}
- /* VF resources get allocated with initialization */
- if (!ice_config_res_vfs(pf)) {
- ret = -EIO;
+ ice_set_dflt_settings_vfs(pf);
+
+ if (ice_start_vfs(pf)) {
+ dev_err(dev, "Failed to start VF(s)\n");
+ ret = -EAGAIN;
goto err_unroll_sriov;
}
- return ret;
+ clear_bit(__ICE_VF_DIS, pf->state);
+ return 0;
err_unroll_sriov:
+ devm_kfree(dev, pf->vf);
pf->vf = NULL;
- devm_kfree(dev, vfs);
- vfs = NULL;
pf->num_alloc_vfs = 0;
err_pci_disable_sriov:
pci_disable_sriov(pf->pdev);
@@ -1404,6 +1621,8 @@ static bool ice_pf_state_is_nominal(struct ice_pf *pf)
* ice_pci_sriov_ena - Enable or change number of VFs
* @pf: pointer to the PF structure
* @num_vfs: number of VFs to allocate
+ *
+ * Returns 0 on success and negative on failure
*/
static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
@@ -1411,20 +1630,10 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
struct device *dev = ice_pf_to_dev(pf);
int err;
- if (!ice_pf_state_is_nominal(pf)) {
- dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
- return -EBUSY;
- }
-
- if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
- dev_err(dev, "This device is not capable of SR-IOV\n");
- return -EOPNOTSUPP;
- }
-
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
ice_free_vfs(pf);
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
- return num_vfs;
+ return 0;
if (num_vfs > pf->num_vfs_supported) {
dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
@@ -1432,45 +1641,77 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
return -EOPNOTSUPP;
}
- dev_info(dev, "Allocating %d VFs\n", num_vfs);
- err = ice_alloc_vfs(pf, num_vfs);
+ dev_info(dev, "Enabling %d VFs\n", num_vfs);
+ err = ice_ena_vfs(pf, num_vfs);
if (err) {
dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
return err;
}
set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
- return num_vfs;
+ return 0;
+}
+
+/**
+ * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
+ * @pf: PF to enabled SR-IOV on
+ */
+static int ice_check_sriov_allowed(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_is_safe_mode(pf)) {
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
/**
* ice_sriov_configure - Enable or change number of VFs via sysfs
* @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate
+ * @num_vfs: number of VFs to allocate or 0 to free VFs
*
- * This function is called when the user updates the number of VFs in sysfs.
+ * This function is called when the user updates the number of VFs in sysfs. On
+ * success return whatever num_vfs was set to by the caller. Return negative on
+ * failure.
*/
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
struct device *dev = ice_pf_to_dev(pf);
+ int err;
- if (ice_is_safe_mode(pf)) {
- dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
- return -EOPNOTSUPP;
- }
+ err = ice_check_sriov_allowed(pf);
+ if (err)
+ return err;
- if (num_vfs)
- return ice_pci_sriov_ena(pf, num_vfs);
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ return 0;
+ }
- if (!pci_vfs_assigned(pdev)) {
- ice_free_vfs(pf);
- } else {
dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
return -EBUSY;
}
- return 0;
+ err = ice_pci_sriov_ena(pf, num_vfs);
+ if (err)
+ return err;
+
+ return num_vfs;
}
/**
@@ -1483,7 +1724,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
void ice_process_vflr_event(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- int vf_id;
+ unsigned int vf_id;
u32 reg;
if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
@@ -1524,7 +1765,7 @@ static void ice_vc_reset_vf(struct ice_vf *vf)
*/
static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
{
- int vf_id;
+ unsigned int vf_id;
ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id];
@@ -1628,8 +1869,9 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
- vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
+ dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
+ vf->vf_id, ice_stat_str(aq_ret),
+ ice_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -1772,7 +2014,7 @@ err:
*/
static void ice_vc_reset_vf_msg(struct ice_vf *vf)
{
- if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
ice_reset_vf(vf, false);
}
@@ -2044,8 +2286,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
+ ice_stat_str(status));
ret = -EIO;
goto out;
}
@@ -2060,6 +2303,174 @@ out:
}
/**
+ * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * @pf: PF structure for accessing VF(s)
+ *
+ * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * else return true
+ */
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+ int vf_idx;
+
+ ice_for_each_vf(pf, vf_idx) {
+ struct ice_vf *vf = &pf->vf[vf_idx];
+
+ /* found a VF that has promiscuous mode configured */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_vc_cfg_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure VF VSIs promiscuous mode
+ */
+static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ bool rm_promisc;
+ int ret = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Leave v_ret alone, lie to the VF on purpose. */
+ goto error_param;
+ }
+
+ rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
+ !(info->flags & FLAG_VF_MULTICAST_PROMISC);
+
+ if (vsi->num_vlan || vf->port_vlan_info) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+ struct net_device *pf_netdev;
+
+ if (!pf_vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ pf_netdev = pf_vsi->netdev;
+
+ ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
+ if (ret) {
+ dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
+ rm_promisc ? "ON" : "OFF", vf->vf_id,
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+
+ ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
+ if (ret) {
+ dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
+
+ if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
+ /* only attempt to set the default forwarding VSI if
+ * it's not currently set
+ */
+ ret = ice_set_dflt_vsi(pf->first_sw, vsi);
+ else if (!set_dflt_vsi &&
+ ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
+ /* only attempt to free the default forwarding VSI if we
+ * are the owner
+ */
+ ret = ice_clear_dflt_vsi(pf->first_sw);
+
+ if (ret) {
+ dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
+ set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ goto error_param;
+ }
+ } else {
+ enum ice_status status;
+ u8 promisc_m;
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC) {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+ } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+ } else {
+ if (vf->port_vlan_info || vsi->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+ }
+
+ /* Configure multicast/unicast with or without VLAN promiscuous
+ * mode
+ */
+ status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
+ if (status) {
+ dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
+ rm_promisc ? "dis" : "en", vf->vf_id,
+ ice_stat_str(status));
+ v_ret = ice_err_to_virt_err(status);
+ goto error_param;
+ } else {
+ dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
+ rm_promisc ? "dis" : "en", vf->vf_id);
+ }
+ }
+
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+ set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ else
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ else
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ v_ret, NULL, 0);
+}
+
+/**
* ice_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2118,6 +2529,52 @@ static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
}
/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
* ice_vc_ena_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2177,6 +2634,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
set_bit(vf_q_id, vf->rxq_ena);
}
@@ -2192,6 +2650,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
if (test_bit(vf_q_id, vf->txq_ena))
continue;
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
set_bit(vf_q_id, vf->txq_ena);
}
@@ -2604,20 +3063,22 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EPERM;
}
- status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
+ status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
return -EEXIST;
} else if (status) {
- dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
- mac_addr, vf->vf_id, status);
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
+ mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
}
- /* only set dflt_lan_addr once */
- if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
- is_unicast_ether_addr(mac_addr))
+ /* Set the default LAN address to the latest unicast MAC address added
+ * by the VF. The default LAN address is reported by the PF via
+ * ndo_get_vf_config.
+ */
+ if (is_unicast_ether_addr(mac_addr))
ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
vf->num_mac++;
@@ -2641,14 +3102,14 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
return 0;
- status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
+ status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
} else if (status) {
- dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
- mac_addr, vf->vf_id, status);
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
+ mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
}
@@ -2834,7 +3295,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
struct device *dev;
struct ice_vf *vf;
u16 vlanprio;
@@ -2856,8 +3316,6 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
}
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
-
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
return ret;
@@ -2870,44 +3328,15 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return 0;
}
- if (vlan_id || qos) {
- /* remove VLAN 0 filter set by default when transitioning from
- * no port VLAN to a port VLAN. No change to old port VLAN on
- * failure.
- */
- ret = ice_vsi_kill_vlan(vsi, 0);
- if (ret)
- return ret;
- ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
- if (ret)
- return ret;
- } else {
- /* add VLAN 0 filter back when transitioning from port VLAN to
- * no port VLAN. No change to old port VLAN on failure.
- */
- ret = ice_vsi_add_vlan(vsi, 0);
- if (ret)
- return ret;
- ret = ice_vsi_manage_pvid(vsi, 0, false);
- if (ret)
- return ret;
- }
+ vf->port_vlan_info = vlanprio;
- if (vlan_id) {
+ if (vf->port_vlan_info)
dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
+ else
+ dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
- /* add VLAN filter for the port VLAN */
- ret = ice_vsi_add_vlan(vsi, vlan_id);
- if (ret)
- return ret;
- }
- /* remove old port VLAN filter with valid VLAN ID or QoS fields */
- if (vf->port_vlan_info)
- ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
-
- /* keep port VLAN information persistent on resets */
- vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
+ ice_vc_reset_vf(vf);
return 0;
}
@@ -2992,8 +3421,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
+ test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
vlan_promisc = true;
if (add_v) {
@@ -3018,7 +3448,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!vid)
continue;
- status = ice_vsi_add_vlan(vsi, vid);
+ status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3317,6 +3747,9 @@ error_handler:
case VIRTCHNL_OP_GET_STATS:
err = ice_vc_get_stats_msg(vf, msg);
break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
+ break;
case VIRTCHNL_OP_ADD_VLAN:
err = ice_vc_add_vlan_msg(vf, msg);
break;
@@ -3390,6 +3823,39 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
}
/**
+ * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
+ * @pf: PF used to reference the switch's rules
+ * @umac: unicast MAC to compare against existing switch rules
+ *
+ * Return true on the first/any match, else return false
+ */
+static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
+{
+ struct ice_sw_recipe *mac_recipe_list =
+ &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* protect MAC filter list access */
+
+ rule_head = &mac_recipe_list->filt_rules;
+ rule_lock = &mac_recipe_list->filt_rule_lock;
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(list_itr, rule_head, list_entry) {
+ u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+
+ if (ether_addr_equal(existing_mac, umac)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3406,25 +3872,41 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
+ if (is_multicast_ether_addr(mac)) {
netdev_err(netdev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
}
vf = &pf->vf[vf_id];
+ /* nothing left to do, unicast MAC already set */
+ if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
+ return 0;
+
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
return ret;
- /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
- * flow will use the updated dflt_lan_addr and add a MAC filter
- * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
- * set the MAC address for this VF.
+ if (ice_unicast_mac_exists(pf, mac)) {
+ netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
+ mac, vf_id, mac);
+ return -EINVAL;
+ }
+
+ /* VF is notified of its new MAC via the PF's response to the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
- vf->pf_set_mac = true;
- netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
- vf_id, mac);
+ if (is_zero_ether_addr(mac)) {
+ /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
+ vf->pf_set_mac = false;
+ netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
+ } else {
+ /* PF will add MAC rule for the VF */
+ vf->pf_set_mac = true;
+ netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
+ }
ice_vc_reset_vf(vf);
return 0;
@@ -3554,6 +4036,24 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
}
/**
+ * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dflt_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
* ice_print_vfs_mdd_event - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -3582,12 +4082,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
vf->mdd_rx_events.last_printed =
vf->mdd_rx_events.count;
-
- dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
- vf->mdd_rx_events.count, hw->pf_id, i,
- vf->dflt_lan_addr.addr,
- test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
- ? "on" : "off");
+ ice_print_vf_rx_mdd_event(vf);
}
/* only print Tx MDD event message if there are new events */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 3f9464269bd2..67aa9110fdd1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -7,7 +7,10 @@
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
-#define ICE_MAX_MACADDR_PER_VF 12
+/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
+ * broadcast, and 16 for additional unicast/multicast filters
+ */
+#define ICE_MAX_MACADDR_PER_VF 18
/* Malicious Driver Detection */
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
@@ -64,7 +67,7 @@ struct ice_mdd_vf_events {
struct ice_vf {
struct ice_pf *pf;
- s16 vf_id; /* VF ID in the PF space */
+ u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
/* first vector index of this VF in the PF space */
int first_vector_idx;
@@ -128,9 +131,11 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -140,6 +145,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf);
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
+#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
@@ -219,5 +225,10 @@ ice_get_vf_stats(struct net_device __always_unused *netdev,
{
return -EOPNOTSUPP;
}
+
+static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
+{
+ return false;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8279db15e870..b6f928c9e9c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
#include "ice_base.h"
@@ -280,28 +280,6 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
}
/**
- * ice_xsk_add_umem - add a UMEM region for XDP sockets
- * @vsi: VSI to which the UMEM will be added
- * @umem: pointer to a requested UMEM region
- * @qid: queue ID
- *
- * Returns 0 on success, negative on error
- */
-static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
-{
- int err;
-
- err = ice_xsk_alloc_umems(vsi);
- if (err)
- return err;
-
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
-
- return 0;
-}
-
-/**
* ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
* @qid: Ring/qid associated with the UMEM
@@ -318,65 +296,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
}
}
-/**
- * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets
- * @vsi: VSI to map the UMEM region
- * @umem: UMEM to map
- *
- * Returns 0 on success, negative on error
- */
-static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i;
-
- dev = ice_pf_to_dev(pf);
- for (i = 0; i < umem->npgs; i++) {
- dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
- PAGE_SIZE,
- DMA_BIDIRECTIONAL,
- ICE_RX_DMA_ATTR);
- if (dma_mapping_error(dev, dma)) {
- dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
- i);
- goto out_unmap;
- }
-
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-out_unmap:
- for (; i > 0; i--) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
- umem->pages[i].dma = 0;
- }
-
- return -EFAULT;
-}
-
-/**
- * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets
- * @vsi: VSI from which the UMEM will be unmapped
- * @umem: UMEM to unmap
- */
-static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- unsigned int i;
-
- dev = ice_pf_to_dev(pf);
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
-
- umem->pages[i].dma = 0;
- }
-}
/**
* ice_xsk_umem_disable - disable a UMEM region
@@ -391,7 +310,7 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
!vsi->xsk_umems[qid])
return -EINVAL;
- ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
+ xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
ice_xsk_remove_umem(vsi, qid);
return 0;
@@ -408,7 +327,6 @@ static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
static int
ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
{
- struct xdp_umem_fq_reuse *reuseq;
int err;
if (vsi->type != ICE_VSI_PF)
@@ -419,20 +337,18 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (qid >= vsi->num_xsk_umems)
return -EINVAL;
+ err = ice_xsk_alloc_umems(vsi);
+ if (err)
+ return err;
+
if (vsi->xsk_umems && vsi->xsk_umems[qid])
return -EBUSY;
- reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
- if (!reuseq)
- return -ENOMEM;
-
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- err = ice_xsk_umem_dma_map(vsi, umem);
- if (err)
- return err;
+ vsi->xsk_umems[qid] = umem;
+ vsi->num_xsk_umems_used++;
- err = ice_xsk_add_umem(vsi, umem, qid);
+ err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+ ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -484,137 +400,22 @@ xsk_umem_if_up:
}
/**
- * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations
- * @zca: zero-cpoy allocator
- * @handle: Buffer handle
- */
-void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
-{
- struct ice_rx_buf *rx_buf;
- struct ice_ring *rx_ring;
- struct xdp_umem *umem;
- u64 hr, mask;
- u16 nta;
-
- rx_ring = container_of(zca, struct ice_ring, zca);
- umem = rx_ring->xsk_umem;
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- mask = umem->chunk_mask;
-
- nta = rx_ring->next_to_alloc;
- rx_buf = &rx_ring->rx_buf[nta];
-
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- handle &= mask;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += hr;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += hr;
-
- rx_buf->handle = (u64)handle + umem->headroom;
-}
-
-/**
- * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem
- * @rx_ring: ring with an xdp_umem bound to it
- * @rx_buf: buffer to which xsk page address will be assigned
- *
- * This function allocates an Rx buffer in the hot path.
- * The buffer can come from fill queue or recycle queue.
- *
- * Returns true if an assignment was successful, false if not.
- */
-static __always_inline bool
-ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- void *addr = rx_buf->addr;
- u64 handle, hr;
-
- if (addr) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- if (!xsk_umem_peek_addr(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += hr;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += hr;
-
- rx_buf->handle = handle + umem->headroom;
-
- xsk_umem_release_addr(umem);
- return true;
-}
-
-/**
- * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem
- * @rx_ring: ring with an xdp_umem bound to it
- * @rx_buf: buffer to which xsk page address will be assigned
- *
- * This function allocates an Rx buffer in the slow path.
- * The buffer can come from fill queue or recycle queue.
- *
- * Returns true if an assignment was successful, false if not.
- */
-static __always_inline bool
-ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- u64 handle, headroom;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- handle &= umem->chunk_mask;
- headroom = umem->headroom + XDP_PACKET_HEADROOM;
-
- rx_buf->dma = xdp_umem_get_dma(umem, handle);
- rx_buf->dma += headroom;
-
- rx_buf->addr = xdp_umem_get_data(umem, handle);
- rx_buf->addr += headroom;
-
- rx_buf->handle = handle + umem->headroom;
-
- xsk_umem_release_addr_rq(umem);
- return true;
-}
-
-/**
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
* @count: The number of buffers to allocate
- * @alloc: the function pointer to call for allocation
*
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
* Returns false if all allocations were successful, true if any fail.
*/
-static bool
-ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
- bool (*alloc)(struct ice_ring *, struct ice_rx_buf *))
+bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
struct ice_rx_buf *rx_buf;
bool ret = false;
+ dma_addr_t dma;
if (!count)
return false;
@@ -623,16 +424,14 @@ ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
rx_buf = &rx_ring->rx_buf[ntu];
do {
- if (!alloc(rx_ring, rx_buf)) {
+ rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ if (!rx_buf->xdp) {
ret = true;
break;
}
- dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0,
- rx_ring->rx_buf_len,
- DMA_BIDIRECTIONAL);
-
- rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma);
+ dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
rx_desc++;
@@ -653,32 +452,6 @@ ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
}
/**
- * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path
- * @rx_ring: Rx ring
- * @count: number of bufs to allocate
- *
- * Returns false on success, true on failure.
- */
-static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count)
-{
- return ice_alloc_rx_bufs_zc(rx_ring, count,
- ice_alloc_buf_fast_zc);
-}
-
-/**
- * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path
- * @rx_ring: Rx ring
- * @count: number of bufs to allocate
- *
- * Returns false on success, true on failure.
- */
-bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count)
-{
- return ice_alloc_rx_bufs_zc(rx_ring, count,
- ice_alloc_buf_slow_zc);
-}
-
-/**
* ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
* @rx_ring: Rx ring
*/
@@ -692,76 +465,21 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
}
/**
- * ice_get_rx_buf_zc - Fetch the current Rx buffer
- * @rx_ring: Rx ring
- * @size: size of a buffer
- *
- * This function returns the current, received Rx buffer and does
- * DMA synchronization.
- *
- * Returns a pointer to the received Rx buffer.
- */
-static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size)
-{
- struct ice_rx_buf *rx_buf;
-
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
-
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0,
- size, DMA_BIDIRECTIONAL);
-
- return rx_buf;
-}
-
-/**
- * ice_reuse_rx_buf_zc - reuse an Rx buffer
- * @rx_ring: Rx ring
- * @old_buf: The buffer to recycle
- *
- * This function recycles a finished Rx buffer, and places it on the recycle
- * queue (next_to_alloc).
- */
-static void
-ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
-{
- unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
- u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
- u16 nta = rx_ring->next_to_alloc;
- struct ice_rx_buf *new_buf;
-
- new_buf = &rx_ring->rx_buf[nta++];
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- new_buf->dma = old_buf->dma & mask;
- new_buf->dma += hr;
-
- new_buf->addr = (void *)((unsigned long)old_buf->addr & mask);
- new_buf->addr += hr;
-
- new_buf->handle = old_buf->handle & mask;
- new_buf->handle += rx_ring->xsk_umem->headroom;
-
- old_buf->addr = NULL;
-}
-
-/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
* @rx_buf: zero-copy Rx buffer
- * @xdp: XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int datasize_hard = xdp->data_end -
- xdp->data_hard_start;
+ unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
+ unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
+ unsigned int datasize_hard = rx_buf->xdp->data_end -
+ rx_buf->xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -769,13 +487,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- ice_reuse_rx_buf_zc(rx_ring, rx_buf);
-
+ xsk_buff_free(rx_buf->xdp);
+ rx_buf->xdp = NULL;
return skb;
}
@@ -802,7 +520,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
}
act = bpf_prog_run_xdp(xdp_prog, xdp);
- xdp->handle += xdp->data - xdp->data_hard_start;
switch (act) {
case XDP_PASS:
break;
@@ -842,9 +559,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
bool failure = false;
- struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -856,8 +570,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
u8 rx_ptype;
if (cleaned_count >= ICE_RX_BUF_WRITE) {
- failure |= ice_alloc_rx_bufs_fast_zc(rx_ring,
- cleaned_count);
+ failure |= ice_alloc_rx_bufs_zc(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -878,25 +592,19 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
- rx_buf = ice_get_rx_buf_zc(rx_ring, size);
- if (!rx_buf->addr)
- break;
- xdp.data = rx_buf->addr;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + size;
- xdp.handle = rx_buf->handle;
+ rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ rx_buf->xdp->data_end = rx_buf->xdp->data + size;
+ xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
- xdp_res = ice_run_xdp_zc(rx_ring, &xdp);
+ xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
if (xdp_res) {
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
- rx_buf->addr = NULL;
- } else {
- ice_reuse_rx_buf_zc(rx_ring, rx_buf);
- }
+ else
+ xsk_buff_free(rx_buf->xdp);
+ rx_buf->xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
cleaned_count++;
@@ -906,7 +614,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
}
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp);
+ skb = ice_construct_skb_zc(rx_ring, rx_buf);
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
break;
@@ -977,17 +685,16 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
-
- dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ desc.len);
tx_buf->bytecount = desc.len;
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
- 0, desc.len, 0);
+ tx_desc->cmd_type_offset_bsz =
+ ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
@@ -1163,11 +870,10 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
- if (!rx_buf->addr)
+ if (!rx_buf->xdp)
continue;
- xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle);
- rx_buf->addr = NULL;
+ rx_buf->xdp = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 8a4ba7c6d549..fc1a06b4df36 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -10,11 +10,10 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
-void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
@@ -27,12 +26,6 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
return -EOPNOTSUPP;
}
-static inline void
-ice_zca_free(struct zero_copy_allocator __always_unused *zca,
- unsigned long __always_unused handle)
-{
-}
-
static inline int
ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
int __always_unused budget)
@@ -48,8 +41,8 @@ ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
}
static inline bool
-ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring,
- u16 __always_unused count)
+ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
+ u16 __always_unused count)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 79ee0a747260..3254737c07a3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -12,7 +12,7 @@
#include "igb.h"
static s32 igb_set_default_fc(struct e1000_hw *hw);
-static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
+static void igb_set_fc_watermarks(struct e1000_hw *hw);
/**
* igb_get_bus_info_pcie - Get PCIe bus information
@@ -687,7 +687,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
wr32(E1000_FCTTV, hw->fc.pause_time);
- ret_val = igb_set_fc_watermarks(hw);
+ igb_set_fc_watermarks(hw);
out:
@@ -723,9 +723,8 @@ void igb_config_collision_dist(struct e1000_hw *hw)
* flow control XON frame transmission is enabled, then set XON frame
* tansmission as well.
**/
-static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
+static void igb_set_fc_watermarks(struct e1000_hw *hw)
{
- s32 ret_val = 0;
u32 fcrtl = 0, fcrth = 0;
/* Set the flow control receive threshold registers. Normally,
@@ -747,8 +746,6 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
}
wr32(E1000_FCRTL, fcrtl);
wr32(E1000_FCRTH, fcrth);
-
- return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 39d3b76a6f5d..2cd003c5ad43 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev,
u32 speed;
u32 supported, advertising;
- status = rd32(E1000_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
supported = (SUPPORTED_10baseT_Half |
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index e3c164c12e10..1c3051db9085 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o igc_ptp.o igc_dump.o
+igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index a1f845a2aa80..5dbc5a156626 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -16,14 +16,210 @@
#include "igc_hw.h"
-/* forward declaration */
-void igc_set_ethtool_ops(struct net_device *);
+void igc_ethtool_set_ops(struct net_device *);
-struct igc_adapter;
-struct igc_ring;
+/* Transmit and receive queues */
+#define IGC_MAX_RX_QUEUES 4
+#define IGC_MAX_TX_QUEUES 4
+
+#define MAX_Q_VECTORS 8
+#define MAX_STD_JUMBO_FRAME_SIZE 9216
+
+#define MAX_ETYPE_FILTER 8
+#define IGC_RETA_SIZE 128
+
+enum igc_mac_filter_type {
+ IGC_MAC_FILTER_TYPE_DST = 0,
+ IGC_MAC_FILTER_TYPE_SRC
+};
+
+struct igc_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 restart_queue2;
+};
+
+struct igc_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
+};
+
+struct igc_rx_packet_stats {
+ u64 ipv4_packets; /* IPv4 headers processed */
+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
+ u64 ipv6_packets; /* IPv6 headers processed */
+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
+ u64 tcp_packets; /* TCP headers processed */
+ u64 udp_packets; /* UDP headers processed */
+ u64 sctp_packets; /* SCTP headers processed */
+ u64 nfs_packets; /* NFS headers processe */
+ u64 other_packets;
+};
+
+struct igc_ring_container {
+ struct igc_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct igc_ring {
+ struct igc_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device for dma mapping */
+ union { /* array of buffer info structs */
+ struct igc_tx_buffer *tx_buffer_info;
+ struct igc_rx_buffer *rx_buffer_info;
+ };
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
+
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+ bool launchtime_enable; /* true if LaunchTime is enabled */
+
+ u32 start_time;
+ u32 end_time;
+
+ /* everything past this point are written often */
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+
+ union {
+ /* TX */
+ struct {
+ struct igc_tx_queue_stats tx_stats;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync tx_syncp2;
+ };
+ /* RX */
+ struct {
+ struct igc_rx_queue_stats rx_stats;
+ struct igc_rx_packet_stats pkt_stats;
+ struct u64_stats_sync rx_syncp;
+ struct sk_buff *skb;
+ };
+ };
+} ____cacheline_internodealigned_in_smp;
+
+/* Board specific private data structure */
+struct igc_adapter {
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+ unsigned int num_q_vectors;
+
+ struct msix_entry *msix_entries;
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+ struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
+
+ /* RX */
+ int num_rx_queues;
+ struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
+
+ struct timer_list watchdog_timer;
+ struct timer_list dma_err_timer;
+ struct timer_list phy_info_timer;
+
+ u32 wol;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+
+ u8 port_num;
+
+ u8 __iomem *io_addr;
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ struct work_struct dma_err_task;
+ bool fc_autoneg;
+
+ u8 tx_timeout_factor;
+
+ int msg_enable;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ ktime_t base_time;
+ ktime_t cycle_time;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+ /* lock for statistics */
+ spinlock_t stats64_lock;
+ struct rtnl_link_stats64 stats64;
+
+ /* structs defined in igc_hw.h */
+ struct igc_hw hw;
+ struct igc_hw_stats stats;
+
+ struct igc_q_vector *q_vector[MAX_Q_VECTORS];
+ u32 eims_enable_mask;
+ u32 eims_other;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 rx_hwtstamp_cleared;
+
+ u32 rss_queues;
+ u32 rss_indir_tbl_init;
+
+ /* Any access to elements in nfc_rule_list is protected by the
+ * nfc_rule_lock.
+ */
+ struct mutex nfc_rule_lock;
+ struct list_head nfc_rule_list;
+ unsigned int nfc_rule_count;
+
+ u8 rss_indir_tbl[IGC_RETA_SIZE];
+
+ unsigned long link_check_timeout;
+ struct igc_info ei;
+
+ u32 test_icr;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
+ /* System time value lock */
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+};
void igc_up(struct igc_adapter *adapter);
void igc_down(struct igc_adapter *adapter);
+int igc_open(struct net_device *netdev);
+int igc_close(struct net_device *netdev);
int igc_setup_tx_resources(struct igc_ring *ring);
int igc_setup_rx_resources(struct igc_ring *ring);
void igc_free_tx_resources(struct igc_ring *ring);
@@ -36,10 +232,6 @@ void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
-int igc_add_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags);
-int igc_del_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags);
void igc_update_stats(struct igc_adapter *adapter);
/* igc_dump declarations */
@@ -50,14 +242,10 @@ extern char igc_driver_name[];
extern char igc_driver_version[];
#define IGC_REGS_LEN 740
-#define IGC_RETA_SIZE 128
/* flags controlling PTP/1588 function */
#define IGC_PTP_ENABLED BIT(0)
-/* Interrupt defines */
-#define IGC_START_ITR 648 /* ~6000 ints/sec */
-
/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
@@ -70,6 +258,7 @@ extern char igc_driver_version[];
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
+#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -78,6 +267,7 @@ extern char igc_driver_version[];
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
#define IGC_20K_ITR 196
@@ -99,13 +289,6 @@ extern char igc_driver_version[];
#define IGC_MIN_RXD 80
#define IGC_MAX_RXD 4096
-/* Transmit and receive queues */
-#define IGC_MAX_RX_QUEUES 4
-#define IGC_MAX_TX_QUEUES 4
-
-#define MAX_Q_VECTORS 8
-#define MAX_STD_JUMBO_FRAME_SIZE 9216
-
/* Supported Rx Buffer Sizes */
#define IGC_RXBUFFER_256 256
#define IGC_RXBUFFER_2048 2048
@@ -232,83 +415,6 @@ struct igc_rx_buffer {
__u16 pagecnt_bias;
};
-struct igc_tx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 restart_queue;
- u64 restart_queue2;
-};
-
-struct igc_rx_queue_stats {
- u64 packets;
- u64 bytes;
- u64 drops;
- u64 csum_err;
- u64 alloc_failed;
-};
-
-struct igc_rx_packet_stats {
- u64 ipv4_packets; /* IPv4 headers processed */
- u64 ipv4e_packets; /* IPv4E headers with extensions processed */
- u64 ipv6_packets; /* IPv6 headers processed */
- u64 ipv6e_packets; /* IPv6E headers with extensions processed */
- u64 tcp_packets; /* TCP headers processed */
- u64 udp_packets; /* UDP headers processed */
- u64 sctp_packets; /* SCTP headers processed */
- u64 nfs_packets; /* NFS headers processe */
- u64 other_packets;
-};
-
-struct igc_ring_container {
- struct igc_ring *ring; /* pointer to linked list of rings */
- unsigned int total_bytes; /* total bytes processed this int */
- unsigned int total_packets; /* total packets processed this int */
- u16 work_limit; /* total work allowed per interrupt */
- u8 count; /* total number of rings in vector */
- u8 itr; /* current ITR setting for ring */
-};
-
-struct igc_ring {
- struct igc_q_vector *q_vector; /* backlink to q_vector */
- struct net_device *netdev; /* back pointer to net_device */
- struct device *dev; /* device for dma mapping */
- union { /* array of buffer info structs */
- struct igc_tx_buffer *tx_buffer_info;
- struct igc_rx_buffer *rx_buffer_info;
- };
- void *desc; /* descriptor ring memory */
- unsigned long flags; /* ring specific flags */
- void __iomem *tail; /* pointer to ring tail register */
- dma_addr_t dma; /* phys address of the ring */
- unsigned int size; /* length of desc. ring in bytes */
-
- u16 count; /* number of desc. in the ring */
- u8 queue_index; /* logical index of the ring*/
- u8 reg_idx; /* physical index of the ring */
- bool launchtime_enable; /* true if LaunchTime is enabled */
-
- /* everything past this point are written often */
- u16 next_to_clean;
- u16 next_to_use;
- u16 next_to_alloc;
-
- union {
- /* TX */
- struct {
- struct igc_tx_queue_stats tx_stats;
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync tx_syncp2;
- };
- /* RX */
- struct {
- struct igc_rx_queue_stats rx_stats;
- struct igc_rx_packet_stats pkt_stats;
- struct u64_stats_sync rx_syncp;
- struct sk_buff *skb;
- };
- };
-} ____cacheline_internodealigned_in_smp;
-
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
@@ -329,8 +435,6 @@ struct igc_q_vector {
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
};
-#define MAX_ETYPE_FILTER (4 - 1)
-
enum igc_filter_match_flags {
IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
IGC_FILTER_FLAG_VLAN_TCI = 0x2,
@@ -338,143 +442,25 @@ enum igc_filter_match_flags {
IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
};
-/* RX network flow classification data structure */
-struct igc_nfc_input {
- /* Byte layout in order, all values with MSB first:
- * match_flags - 1 byte
- * etype - 2 bytes
- * vlan_tci - 2 bytes
- */
+struct igc_nfc_filter {
u8 match_flags;
- __be16 etype;
- __be16 vlan_tci;
+ u16 etype;
+ u16 vlan_tci;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
};
-struct igc_nfc_filter {
- struct hlist_node nfc_node;
- struct igc_nfc_input filter;
- unsigned long cookie;
- u16 etype_reg_index;
- u16 sw_idx;
+struct igc_nfc_rule {
+ struct list_head list;
+ struct igc_nfc_filter filter;
+ u32 location;
u16 action;
};
-struct igc_mac_addr {
- u8 addr[ETH_ALEN];
- u8 queue;
- u8 state; /* bitmask */
-};
-
-#define IGC_MAC_STATE_DEFAULT 0x1
-#define IGC_MAC_STATE_IN_USE 0x2
-#define IGC_MAC_STATE_SRC_ADDR 0x4
-#define IGC_MAC_STATE_QUEUE_STEERING 0x8
-
-#define IGC_MAX_RXNFC_FILTERS 16
-
-/* Board specific private data structure */
-struct igc_adapter {
- struct net_device *netdev;
-
- unsigned long state;
- unsigned int flags;
- unsigned int num_q_vectors;
-
- struct msix_entry *msix_entries;
-
- /* TX */
- u16 tx_work_limit;
- u32 tx_timeout_count;
- int num_tx_queues;
- struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
-
- /* RX */
- int num_rx_queues;
- struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
-
- struct timer_list watchdog_timer;
- struct timer_list dma_err_timer;
- struct timer_list phy_info_timer;
-
- u32 wol;
- u32 en_mng_pt;
- u16 link_speed;
- u16 link_duplex;
-
- u8 port_num;
-
- u8 __iomem *io_addr;
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
-
- struct work_struct reset_task;
- struct work_struct watchdog_task;
- struct work_struct dma_err_task;
- bool fc_autoneg;
-
- u8 tx_timeout_factor;
-
- int msg_enable;
- u32 max_frame_size;
- u32 min_frame_size;
-
- /* OS defined structs */
- struct pci_dev *pdev;
- /* lock for statistics */
- spinlock_t stats64_lock;
- struct rtnl_link_stats64 stats64;
-
- /* structs defined in igc_hw.h */
- struct igc_hw hw;
- struct igc_hw_stats stats;
-
- struct igc_q_vector *q_vector[MAX_Q_VECTORS];
- u32 eims_enable_mask;
- u32 eims_other;
-
- u16 tx_ring_count;
- u16 rx_ring_count;
-
- u32 tx_hwtstamp_timeouts;
- u32 tx_hwtstamp_skipped;
- u32 rx_hwtstamp_cleared;
-
- u32 rss_queues;
- u32 rss_indir_tbl_init;
-
- /* RX network flow classification support */
- struct hlist_head nfc_filter_list;
- struct hlist_head cls_flower_list;
- unsigned int nfc_filter_count;
-
- /* lock for RX network flow classification filter */
- spinlock_t nfc_lock;
- bool etype_bitmap[MAX_ETYPE_FILTER];
-
- struct igc_mac_addr *mac_table;
-
- u8 rss_indir_tbl[IGC_RETA_SIZE];
-
- unsigned long link_check_timeout;
- struct igc_info ei;
-
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info ptp_caps;
- struct work_struct ptp_tx_work;
- struct sk_buff *ptp_tx_skb;
- struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
- unsigned long last_rx_ptp_check;
- unsigned long last_rx_timestamp;
- unsigned int ptp_flags;
- /* System time value lock */
- spinlock_t tmreg_lock;
- struct cyclecounter cc;
- struct timecounter tc;
-};
+/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority
+ * based, and 8 ethertype based.
+ */
+#define IGC_MAX_RXNFC_RULES 32
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
@@ -550,12 +536,11 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
return 0;
}
-/* forward declaration */
void igc_reinit_locked(struct igc_adapter *);
-int igc_add_filter(struct igc_adapter *adapter,
- struct igc_nfc_filter *input);
-int igc_erase_filter(struct igc_adapter *adapter,
- struct igc_nfc_filter *input);
+struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
+ u32 location);
+int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
+void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 5a506440560a..cc5a6cf531c7 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -26,7 +26,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
*/
ret_val = igc_disable_pcie_master(hw);
if (ret_val)
- hw_dbg("PCI-E Master disable polling has failed.\n");
+ hw_dbg("PCI-E Master disable polling has failed\n");
hw_dbg("Masking off all interrupts\n");
wr32(IGC_IMC, 0xffffffff);
@@ -177,7 +177,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
*/
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
+ hw_dbg("Error resetting the PHY\n");
goto out;
}
@@ -212,6 +212,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_I:
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_K2:
+ case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I225_IT:
case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
@@ -364,7 +367,7 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)
}
if (ms_wait == 10)
- pr_debug("Queue disable timed out after 10ms\n");
+ hw_dbg("Queue disable timed out after 10ms\n");
/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
* incoming packets are rejected. Set enable and wait 2ms so that
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 4ddccccf42cc..186deb1d9375 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -44,13 +44,9 @@
/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */
#define IGC_WUPM_BYTES 128
-/* Physical Func Reset Done Indication */
-#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
-#define PHY_FORCE_LIMIT 20
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
@@ -66,8 +62,14 @@
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
+#define IGC_RAH_RAH_MASK 0x0000FFFF
+#define IGC_RAH_ASEL_MASK 0x00030000
+#define IGC_RAH_ASEL_SRC_ADDR BIT(16)
+#define IGC_RAH_QSEL_MASK 0x000C0000
+#define IGC_RAH_QSEL_SHIFT 18
+#define IGC_RAH_QSEL_ENABLE BIT(28)
#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
-#define IGC_RAH_POOL_1 0x00040000
+
#define IGC_RAL_MAC_ADDR_LEN 4
#define IGC_RAH_MAC_ADDR_LEN 2
@@ -94,8 +96,6 @@
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define IGC_CONNSW_AUTOSENSE_EN 0x1
-
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
@@ -166,11 +166,6 @@
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
-
-#define NVM_PBA_OFFSET_0 8
-#define NVM_PBA_OFFSET_1 9
-#define NVM_RESERVED_WORD 0xFFFF
-#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* Collision related configuration parameters */
@@ -252,7 +247,6 @@
/* Interrupt Cause Set */
#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */
#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */
-#define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */
#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
@@ -271,21 +265,13 @@
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */
-#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */
-#define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */
-#define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */
#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */
#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */
#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
-#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* IPSec Encrypt Enable */
@@ -377,6 +363,11 @@
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
+
+#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
+#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+
/* Time Sync Interrupt Causes */
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
@@ -387,9 +378,6 @@
#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS
-/* PTP Queue Filter */
-#define IGC_ETQF_1588 BIT(30)
-
#define IGC_FTQF_VF_BP 0x00008000
#define IGC_FTQF_1588_TIME_STAMP 0x08000000
#define IGC_FTQF_MASK 0xF0000000
@@ -431,6 +419,14 @@
#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
+/* Transmit Scheduling */
+#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
+#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
+
+#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
+#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
+#define IGC_TXQCTL_STRICT_END 0x00000004
+
/* Receive Checksum Control */
#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
@@ -497,16 +493,15 @@
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
-#define IGC_MDIC_DEST 0x80000000
#define IGC_N0_QUEUE -1
#define IGC_MAX_MAC_HDR_LEN 127
#define IGC_MAX_NETWORK_HDR_LEN 511
-#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4))
-#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
-#define IGC_VLAPQF_QUEUE_MASK 0x03
+#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4))
+#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4))
+#define IGC_VLANPQF_QUEUE_MASK 0x03
#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */
diff --git a/drivers/net/ethernet/intel/igc/igc_diag.c b/drivers/net/ethernet/intel/igc/igc_diag.c
new file mode 100644
index 000000000000..cc621970c0cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_diag.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Intel Corporation */
+
+#include "igc.h"
+#include "igc_diag.h"
+
+static struct igc_reg_test reg_test[] = {
+ { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
+ { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB },
+ { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF },
+ { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { IGC_RA, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { IGC_RA, 16, TABLE64_TEST_HI,
+ 0x900FFFFF, 0xFFFFFFFF },
+ { IGC_MTA, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0}
+};
+
+static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 pat, val, before;
+ static const u32 test_pattern[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
+
+ for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+ before = rd32(reg);
+ wr32(reg, test_pattern[pat] & write);
+ val = rd32(reg);
+ if (val != (test_pattern[pat] & write & mask)) {
+ netdev_err(adapter->netdev,
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X",
+ reg, val, test_pattern[pat] & write & mask);
+ *data = reg;
+ wr32(reg, before);
+ return false;
+ }
+ wr32(reg, before);
+ }
+ return true;
+}
+
+static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 val, before;
+
+ before = rd32(reg);
+ wr32(reg, write & mask);
+ val = rd32(reg);
+ if ((write & mask) != (val & mask)) {
+ netdev_err(adapter->netdev,
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X",
+ reg, (val & mask), (write & mask));
+ *data = reg;
+ wr32(reg, before);
+ return false;
+ }
+ wr32(reg, before);
+ return true;
+}
+
+bool igc_reg_test(struct igc_adapter *adapter, u64 *data)
+{
+ struct igc_reg_test *test = reg_test;
+ struct igc_hw *hw = &adapter->hw;
+ u32 value, before, after;
+ u32 i, toggle, b = false;
+
+ /* Because the status register is such a special case,
+ * we handle it separately from the rest of the register
+ * tests. Some bits are read-only, some toggle, and some
+ * are writeable.
+ */
+ toggle = 0x6800D3;
+ before = rd32(IGC_STATUS);
+ value = before & toggle;
+ wr32(IGC_STATUS, toggle);
+ after = rd32(IGC_STATUS) & toggle;
+ if (value != after) {
+ netdev_err(adapter->netdev,
+ "failed STATUS register test got: 0x%08X expected: 0x%08X",
+ after, value);
+ *data = 1;
+ return false;
+ }
+ /* restore previous status */
+ wr32(IGC_STATUS, before);
+
+ /* Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ b = reg_pattern_test(adapter, data,
+ test->reg + 4 + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE32_TEST:
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ }
+ if (!b)
+ return false;
+ }
+ test++;
+ }
+ *data = 0;
+ return true;
+}
+
+bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ *data = 0;
+
+ if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) {
+ *data = 1;
+ return false;
+ }
+
+ return true;
+}
+
+bool igc_link_test(struct igc_adapter *adapter, u64 *data)
+{
+ bool link_up;
+
+ *data = 0;
+
+ /* add delay to give enough time for autonegotioation to finish */
+ if (adapter->hw.mac.autoneg)
+ ssleep(5);
+
+ link_up = igc_has_link(adapter);
+ if (!link_up) {
+ *data = 1;
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_diag.h b/drivers/net/ethernet/intel/igc/igc_diag.h
new file mode 100644
index 000000000000..600658e33bec
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_diag.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Intel Corporation */
+
+bool igc_reg_test(struct igc_adapter *adapter, u64 *data);
+bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data);
+bool igc_link_test(struct igc_adapter *adapter, u64 *data);
+
+struct igc_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define TABLE32_TEST 3
+#define TABLE64_TEST_LO 4
+#define TABLE64_TEST_HI 5
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
index 657ab50ae296..4b9ec7d0b727 100644
--- a/drivers/net/ethernet/intel/igc/igc_dump.c
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -35,10 +35,6 @@ static const struct igc_reg_info igc_reg_info_tbl[] = {
{IGC_TDH(0), "TDH"},
{IGC_TDT(0), "TDT"},
{IGC_TXDCTL(0), "TXDCTL"},
- {IGC_TDFH, "TDFH"},
- {IGC_TDFT, "TDFT"},
- {IGC_TDFHS, "TDFHS"},
- {IGC_TDFPC, "TDFPC"},
/* List Terminator */
{}
@@ -47,6 +43,7 @@ static const struct igc_reg_info igc_reg_info_tbl[] = {
/* igc_regdump - register printout routine */
static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
{
+ struct net_device *dev = igc_get_hw_dev(hw);
int n = 0;
char rname[16];
u32 regs[8];
@@ -101,13 +98,14 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
regs[n] = rd32(IGC_TXDCTL(n));
break;
default:
- pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
+ netdev_info(dev, "%-15s %08x\n", reginfo->name,
+ rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
- pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
- regs[2], regs[3]);
+ netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
}
/* igc_rings_dump - Tx-rings and Rx-rings */
@@ -125,39 +123,34 @@ void igc_rings_dump(struct igc_adapter *adapter)
if (!netif_msg_hw(adapter))
return;
- /* Print netdevice Info */
- if (netdev) {
- dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start\n");
- pr_info("%-15s %016lX %016lX\n", netdev->name,
- netdev->state, dev_trans_start(netdev));
- }
+ netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n",
+ netdev->state, dev_trans_start(netdev));
/* Print TX Ring Summary */
- if (!netdev || !netif_running(netdev))
+ if (!netif_running(netdev))
goto exit;
- dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ netdev_info(netdev, "TX Rings Summary\n");
+ netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igc_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)dma_unmap_addr(buffer_info, dma),
- dma_unmap_len(buffer_info, len),
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp);
+ netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
}
/* Print TX Rings */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
- dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+ netdev_info(netdev, "TX Rings Dump\n");
/* Transmit Descriptor Formats
*
@@ -172,10 +165,11 @@ void igc_rings_dump(struct igc_adapter *adapter)
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- pr_info("------------------------------------\n");
- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
+ netdev_info(netdev, "------------------------------------\n");
+ netdev_info(netdev, "TX QUEUE INDEX = %d\n",
+ tx_ring->queue_index);
+ netdev_info(netdev, "------------------------------------\n");
+ netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
@@ -194,14 +188,14 @@ void igc_rings_dump(struct igc_adapter *adapter)
else
next_desc = "";
- pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
- i, le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- (u64)dma_unmap_addr(buffer_info, dma),
- dma_unmap_len(buffer_info, len),
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp,
- buffer_info->skb, next_desc);
+ netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
@@ -214,19 +208,19 @@ void igc_rings_dump(struct igc_adapter *adapter)
/* Print RX Rings Summary */
rx_ring_summary:
- dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- pr_info("Queue [NTU] [NTC]\n");
+ netdev_info(netdev, "RX Rings Summary\n");
+ netdev_info(netdev, "Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- pr_info(" %5d %5X %5X\n",
- n, rx_ring->next_to_use, rx_ring->next_to_clean);
+ netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use,
+ rx_ring->next_to_clean);
}
/* Print RX Rings */
if (!netif_msg_rx_status(adapter))
goto exit;
- dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ netdev_info(netdev, "RX Rings Dump\n");
/* Advanced Receive Descriptor (Read) Format
* 63 1 0
@@ -251,11 +245,12 @@ rx_ring_summary:
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- pr_info("------------------------------------\n");
- pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
+ netdev_info(netdev, "------------------------------------\n");
+ netdev_info(netdev, "RX QUEUE INDEX = %d\n",
+ rx_ring->queue_index);
+ netdev_info(netdev, "------------------------------------\n");
+ netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
@@ -275,18 +270,18 @@ rx_ring_summary:
if (staterr & IGC_RXD_STAT_DD) {
/* Descriptor Done */
- pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
- "RWB", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- next_desc);
+ netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ next_desc);
} else {
- pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
- "R ", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
- next_desc);
+ netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ next_desc);
if (netif_msg_pktdata(adapter) &&
buffer_info->dma && buffer_info->page) {
@@ -314,8 +309,8 @@ void igc_regs_dump(struct igc_adapter *adapter)
struct igc_reg_info *reginfo;
/* Print Registers */
- dev_info(&adapter->pdev->dev, "Register Dump\n");
- pr_info(" Register Name Value\n");
+ netdev_info(adapter->netdev, "Register Dump\n");
+ netdev_info(adapter->netdev, "Register Name Value\n");
for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl;
reginfo->name; reginfo++) {
igc_regdump(hw, reginfo);
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index f530fc29b074..a938ec8db681 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -6,6 +6,7 @@
#include <linux/pm_runtime.h>
#include "igc.h"
+#include "igc_diag.h"
/* forward declaration */
struct igc_stats {
@@ -123,8 +124,8 @@ static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings)
-static void igc_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+static void igc_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -138,13 +139,13 @@ static void igc_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN;
}
-static int igc_get_regs_len(struct net_device *netdev)
+static int igc_ethtool_get_regs_len(struct net_device *netdev)
{
return IGC_REGS_LEN * sizeof(u32);
}
-static void igc_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+static void igc_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -153,7 +154,7 @@ static void igc_get_regs(struct net_device *netdev,
memset(p, 0, IGC_REGS_LEN * sizeof(u32));
- regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = rd32(IGC_CTRL);
@@ -306,9 +307,24 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[164 + i] = rd32(IGC_TDT(i));
for (i = 0; i < 4; i++)
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
+
+ /* XXX: Due to a bug few lines above, RAL and RAH registers are
+ * overwritten. To preserve the ABI, we write these registers again in
+ * regs_buff.
+ */
+ for (i = 0; i < 16; i++)
+ regs_buff[172 + i] = rd32(IGC_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[188 + i] = rd32(IGC_RAH(i));
+
+ regs_buff[204] = rd32(IGC_VLANPQF);
+
+ for (i = 0; i < 8; i++)
+ regs_buff[205 + i] = rd32(IGC_ETQF(i));
}
-static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void igc_ethtool_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -339,7 +355,8 @@ static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
wol->wolopts |= WAKE_PHY;
}
-static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int igc_ethtool_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -367,21 +384,21 @@ static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-static u32 igc_get_msglevel(struct net_device *netdev)
+static u32 igc_ethtool_get_msglevel(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
-static void igc_set_msglevel(struct net_device *netdev, u32 data)
+static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data)
{
struct igc_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
-static int igc_nway_reset(struct net_device *netdev)
+static int igc_ethtool_nway_reset(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -390,7 +407,7 @@ static int igc_nway_reset(struct net_device *netdev)
return 0;
}
-static u32 igc_get_link(struct net_device *netdev)
+static u32 igc_ethtool_get_link(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_mac_info *mac = &adapter->hw.mac;
@@ -407,15 +424,15 @@ static u32 igc_get_link(struct net_device *netdev)
return igc_has_link(adapter);
}
-static int igc_get_eeprom_len(struct net_device *netdev)
+static int igc_ethtool_get_eeprom_len(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
return adapter->hw.nvm.word_size * 2;
}
-static int igc_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static int igc_ethtool_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -461,8 +478,8 @@ static int igc_get_eeprom(struct net_device *netdev,
return ret_val;
}
-static int igc_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static int igc_ethtool_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -529,8 +546,8 @@ static int igc_set_eeprom(struct net_device *netdev,
return ret_val;
}
-static void igc_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void igc_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -540,8 +557,8 @@ static void igc_get_ringparam(struct net_device *netdev,
ring->tx_pending = adapter->tx_ring_count;
}
-static int igc_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static int igc_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_ring *temp_ring;
@@ -655,8 +672,8 @@ clear_reset:
return err;
}
-static void igc_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static void igc_ethtool_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -674,8 +691,8 @@ static void igc_get_pauseparam(struct net_device *netdev,
}
}
-static int igc_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+static int igc_ethtool_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -714,7 +731,8 @@ static int igc_set_pauseparam(struct net_device *netdev,
return retval;
}
-static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u8 *p = data;
@@ -765,7 +783,7 @@ static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
-static int igc_get_sset_count(struct net_device *netdev, int sset)
+static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
@@ -779,7 +797,7 @@ static int igc_get_sset_count(struct net_device *netdev, int sset)
}
}
-static void igc_get_ethtool_stats(struct net_device *netdev,
+static void igc_ethtool_get_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -835,8 +853,8 @@ static void igc_get_ethtool_stats(struct net_device *netdev,
spin_unlock(&adapter->stats64_lock);
}
-static int igc_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int igc_ethtool_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -855,8 +873,8 @@ static int igc_get_coalesce(struct net_device *netdev,
return 0;
}
-static int igc_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int igc_ethtool_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
@@ -913,81 +931,83 @@ static int igc_set_coalesce(struct net_device *netdev,
}
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
-static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
- struct igc_nfc_filter *rule = NULL;
+ struct igc_nfc_rule *rule = NULL;
- /* report total rule count */
- cmd->data = IGC_MAX_RXNFC_FILTERS;
+ cmd->data = IGC_MAX_RXNFC_RULES;
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
- if (fsp->location <= rule->sw_idx)
- break;
+ mutex_lock(&adapter->nfc_rule_lock);
+
+ rule = igc_get_nfc_rule(adapter, fsp->location);
+ if (!rule)
+ goto out;
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->action;
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
}
- if (!rule || fsp->location != rule->sw_idx)
- return -EINVAL;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci);
+ fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ }
- if (rule->filter.match_flags) {
- fsp->flow_type = ETHER_FLOW;
- fsp->ring_cookie = rule->action;
- if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
- fsp->h_u.ether_spec.h_proto = rule->filter.etype;
- fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
- }
- if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
- fsp->flow_type |= FLOW_EXT;
- fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
- fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
- }
- if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
- ether_addr_copy(fsp->h_u.ether_spec.h_dest,
- rule->filter.dst_addr);
- /* As we only support matching by the full
- * mask, return the mask to userspace
- */
- eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
- }
- if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
- ether_addr_copy(fsp->h_u.ether_spec.h_source,
- rule->filter.src_addr);
- /* As we only support matching by the full
- * mask, return the mask to userspace
- */
- eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
- }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ rule->filter.dst_addr);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
- return 0;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_source,
+ rule->filter.src_addr);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
}
+
+ mutex_unlock(&adapter->nfc_rule_lock);
+ return 0;
+
+out:
+ mutex_unlock(&adapter->nfc_rule_lock);
return -EINVAL;
}
-static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
{
- struct igc_nfc_filter *rule;
+ struct igc_nfc_rule *rule;
int cnt = 0;
- /* report total rule count */
- cmd->data = IGC_MAX_RXNFC_FILTERS;
+ cmd->data = IGC_MAX_RXNFC_RULES;
+
+ mutex_lock(&adapter->nfc_rule_lock);
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
- if (cnt == cmd->rule_cnt)
+ list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
+ if (cnt == cmd->rule_cnt) {
+ mutex_unlock(&adapter->nfc_rule_lock);
return -EMSGSIZE;
- rule_locs[cnt] = rule->sw_idx;
+ }
+ rule_locs[cnt] = rule->location;
cnt++;
}
+ mutex_unlock(&adapter->nfc_rule_lock);
+
cmd->rule_cnt = cnt;
return 0;
}
-static int igc_get_rss_hash_opts(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
cmd->data = 0;
@@ -1036,41 +1056,33 @@ static int igc_get_rss_hash_opts(struct igc_adapter *adapter,
return 0;
}
-static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static int igc_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct igc_adapter *adapter = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_queues;
- ret = 0;
- break;
+ return 0;
case ETHTOOL_GRXCLSRLCNT:
- cmd->rule_cnt = adapter->nfc_filter_count;
- ret = 0;
- break;
+ cmd->rule_cnt = adapter->nfc_rule_count;
+ return 0;
case ETHTOOL_GRXCLSRULE:
- ret = igc_get_ethtool_nfc_entry(adapter, cmd);
- break;
+ return igc_ethtool_get_nfc_rule(adapter, cmd);
case ETHTOOL_GRXCLSRLALL:
- ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs);
- break;
+ return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs);
case ETHTOOL_GRXFH:
- ret = igc_get_rss_hash_opts(adapter, cmd);
- break;
+ return igc_ethtool_get_rss_hash_opts(adapter, cmd);
default:
- break;
+ return -EOPNOTSUPP;
}
-
- return ret;
}
#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
IGC_FLAG_RSS_FIELD_IPV6_UDP)
-static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
{
u32 flags = adapter->flags;
@@ -1145,8 +1157,8 @@ static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
if ((flags & UDP_RSS_FLAGS) &&
!(adapter->flags & UDP_RSS_FLAGS))
- dev_err(&adapter->pdev->dev,
- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+ netdev_err(adapter->netdev,
+ "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
adapter->flags = flags;
@@ -1171,344 +1183,184 @@ static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
return 0;
}
-static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter,
- struct igc_nfc_filter *input)
+static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
{
- struct igc_hw *hw = &adapter->hw;
- u8 i;
- u32 etqf;
- u16 etype;
-
- /* find an empty etype filter register */
- for (i = 0; i < MAX_ETYPE_FILTER; ++i) {
- if (!adapter->etype_bitmap[i])
- break;
- }
- if (i == MAX_ETYPE_FILTER) {
- dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n");
- return -EINVAL;
- }
+ INIT_LIST_HEAD(&rule->list);
- adapter->etype_bitmap[i] = true;
+ rule->action = fsp->ring_cookie;
+ rule->location = fsp->location;
- etqf = rd32(IGC_ETQF(i));
- etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK);
-
- etqf |= IGC_ETQF_FILTER_ENABLE;
- etqf &= ~IGC_ETQF_ETYPE_MASK;
- etqf |= (etype & IGC_ETQF_ETYPE_MASK);
-
- etqf &= ~IGC_ETQF_QUEUE_MASK;
- etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT)
- & IGC_ETQF_QUEUE_MASK);
- etqf |= IGC_ETQF_QUEUE_ENABLE;
-
- wr32(IGC_ETQF(i), etqf);
-
- input->etype_reg_index = i;
-
- return 0;
-}
-
-static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter,
- struct igc_nfc_filter *input)
-{
- struct igc_hw *hw = &adapter->hw;
- u8 vlan_priority;
- u16 queue_index;
- u32 vlapqf;
-
- vlapqf = rd32(IGC_VLAPQF);
- vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
- >> VLAN_PRIO_SHIFT;
- queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK;
-
- /* check whether this vlan prio is already set */
- if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) &&
- queue_index != input->action) {
- dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n");
- return -EEXIST;
- }
-
- vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority);
- vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action);
-
- wr32(IGC_VLAPQF, vlapqf);
-
- return 0;
-}
-
-int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
-{
- struct igc_hw *hw = &adapter->hw;
- int err = -EINVAL;
-
- if (hw->mac.type == igc_i225 &&
- !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) {
- dev_err(&adapter->pdev->dev,
- "i225 doesn't support flow classification rules specifying only source addresses.\n");
- return -EOPNOTSUPP;
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci);
+ rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
}
- if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
- err = igc_rxnfc_write_etype_filter(adapter, input);
- if (err)
- return err;
+ if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
+ rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto);
+ rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE;
}
- if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
- err = igc_add_mac_steering_filter(adapter,
- input->filter.dst_addr,
- input->action, 0);
- err = min_t(int, err, 0);
- if (err)
- return err;
+ /* Both source and destination address filters only support the full
+ * mask.
+ */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+ rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(rule->filter.src_addr,
+ fsp->h_u.ether_spec.h_source);
}
- if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
- err = igc_add_mac_steering_filter(adapter,
- input->filter.src_addr,
- input->action,
- IGC_MAC_STATE_SRC_ADDR);
- err = min_t(int, err, 0);
- if (err)
- return err;
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(rule->filter.dst_addr,
+ fsp->h_u.ether_spec.h_dest);
}
-
- if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
- err = igc_rxnfc_write_vlan_prio_filter(adapter, input);
-
- return err;
-}
-
-static void igc_clear_etype_filter_regs(struct igc_adapter *adapter,
- u16 reg_index)
-{
- struct igc_hw *hw = &adapter->hw;
- u32 etqf = rd32(IGC_ETQF(reg_index));
-
- etqf &= ~IGC_ETQF_QUEUE_ENABLE;
- etqf &= ~IGC_ETQF_QUEUE_MASK;
- etqf &= ~IGC_ETQF_FILTER_ENABLE;
-
- wr32(IGC_ETQF(reg_index), etqf);
-
- adapter->etype_bitmap[reg_index] = false;
-}
-
-static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter,
- u16 vlan_tci)
-{
- struct igc_hw *hw = &adapter->hw;
- u8 vlan_priority;
- u32 vlapqf;
-
- vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-
- vlapqf = rd32(IGC_VLAPQF);
- vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority);
- vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority,
- IGC_VLAPQF_QUEUE_MASK);
-
- wr32(IGC_VLAPQF, vlapqf);
}
-int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+/**
+ * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid
+ * @adapter: Pointer to adapter
+ * @rule: Rule under evaluation
+ *
+ * The driver doesn't support rules with multiple matches so if more than
+ * one bit in filter flags is set, @rule is considered invalid.
+ *
+ * Also, if there is already another rule with the same filter in a different
+ * location, @rule is considered invalid.
+ *
+ * Context: Expects adapter->nfc_rule_lock to be held by caller.
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
+ */
+static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter,
+ struct igc_nfc_rule *rule)
{
- if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
- igc_clear_etype_filter_regs(adapter,
- input->etype_reg_index);
-
- if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
- igc_clear_vlan_prio_filter(adapter,
- ntohs(input->filter.vlan_tci));
-
- if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
- igc_del_mac_steering_filter(adapter, input->filter.src_addr,
- input->action,
- IGC_MAC_STATE_SRC_ADDR);
-
- if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
- igc_del_mac_steering_filter(adapter, input->filter.dst_addr,
- input->action, 0);
-
- return 0;
-}
-
-static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter,
- struct igc_nfc_filter *input,
- u16 sw_idx)
-{
- struct igc_nfc_filter *rule, *parent;
- int err = -EINVAL;
-
- parent = NULL;
- rule = NULL;
+ struct net_device *dev = adapter->netdev;
+ u8 flags = rule->filter.match_flags;
+ struct igc_nfc_rule *tmp;
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
- /* hash found, or no matching entry */
- if (rule->sw_idx >= sw_idx)
- break;
- parent = rule;
+ if (!flags) {
+ netdev_dbg(dev, "Rule with no match\n");
+ return -EINVAL;
}
- /* if there is an old rule occupying our place remove it */
- if (rule && rule->sw_idx == sw_idx) {
- if (!input)
- err = igc_erase_filter(adapter, rule);
-
- hlist_del(&rule->nfc_node);
- kfree(rule);
- adapter->nfc_filter_count--;
+ if (flags & (flags - 1)) {
+ netdev_dbg(dev, "Rule with multiple matches not supported\n");
+ return -EOPNOTSUPP;
}
- /* If no input this was a delete, err should be 0 if a rule was
- * successfully found and removed from the list else -EINVAL
- */
- if (!input)
- return err;
-
- /* initialize node */
- INIT_HLIST_NODE(&input->nfc_node);
-
- /* add filter to the list */
- if (parent)
- hlist_add_behind(&input->nfc_node, &parent->nfc_node);
- else
- hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
-
- /* update counts */
- adapter->nfc_filter_count++;
+ list_for_each_entry(tmp, &adapter->nfc_rule_list, list) {
+ if (!memcmp(&rule->filter, &tmp->filter,
+ sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "Rule already exists\n");
+ return -EEXIST;
+ }
+ }
return 0;
}
-static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
struct net_device *netdev = adapter->netdev;
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
- struct igc_nfc_filter *input, *rule;
- int err = 0;
+ struct igc_nfc_rule *rule, *old_rule;
+ int err;
- if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ if (!(netdev->hw_features & NETIF_F_NTUPLE)) {
+ netdev_dbg(netdev, "N-tuple filters disabled\n");
return -EOPNOTSUPP;
+ }
- /* Don't allow programming if the action is a queue greater than
- * the number of online Rx queues.
- */
- if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
- fsp->ring_cookie >= adapter->num_rx_queues) {
- dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n");
- return -EINVAL;
+ if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) {
+ netdev_dbg(netdev, "Only ethernet flow type is supported\n");
+ return -EOPNOTSUPP;
}
- /* Don't allow indexes to exist outside of available space */
- if (fsp->location >= IGC_MAX_RXNFC_FILTERS) {
- dev_err(&adapter->pdev->dev, "Location out of range\n");
- return -EINVAL;
+ if ((fsp->flow_type & FLOW_EXT) &&
+ fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
+ netdev_dbg(netdev, "VLAN mask not supported\n");
+ return -EOPNOTSUPP;
}
- if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ if (fsp->ring_cookie >= adapter->num_rx_queues) {
+ netdev_dbg(netdev, "Invalid action\n");
return -EINVAL;
-
- input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!input)
- return -ENOMEM;
-
- if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
- input->filter.etype = fsp->h_u.ether_spec.h_proto;
- input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE;
}
- /* Only support matching addresses by the full mask */
- if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
- input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
- ether_addr_copy(input->filter.src_addr,
- fsp->h_u.ether_spec.h_source);
+ if (fsp->location >= IGC_MAX_RXNFC_RULES) {
+ netdev_dbg(netdev, "Invalid location\n");
+ return -EINVAL;
}
- /* Only support matching addresses by the full mask */
- if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
- input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
- ether_addr_copy(input->filter.dst_addr,
- fsp->h_u.ether_spec.h_dest);
- }
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
- if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
- if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
- err = -EINVAL;
- goto err_out;
- }
- input->filter.vlan_tci = fsp->h_ext.vlan_tci;
- input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
- }
+ igc_ethtool_init_nfc_rule(rule, fsp);
- input->action = fsp->ring_cookie;
- input->sw_idx = fsp->location;
+ mutex_lock(&adapter->nfc_rule_lock);
- spin_lock(&adapter->nfc_lock);
+ err = igc_ethtool_check_nfc_rule(adapter, rule);
+ if (err)
+ goto err;
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
- if (!memcmp(&input->filter, &rule->filter,
- sizeof(input->filter))) {
- err = -EEXIST;
- dev_err(&adapter->pdev->dev,
- "ethtool: this filter is already set\n");
- goto err_out_w_lock;
- }
- }
+ old_rule = igc_get_nfc_rule(adapter, fsp->location);
+ if (old_rule)
+ igc_del_nfc_rule(adapter, old_rule);
- err = igc_add_filter(adapter, input);
+ err = igc_add_nfc_rule(adapter, rule);
if (err)
- goto err_out_w_lock;
-
- igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ goto err;
- spin_unlock(&adapter->nfc_lock);
+ mutex_unlock(&adapter->nfc_rule_lock);
return 0;
-err_out_w_lock:
- spin_unlock(&adapter->nfc_lock);
-err_out:
- kfree(input);
+err:
+ mutex_unlock(&adapter->nfc_rule_lock);
+ kfree(rule);
return err;
}
-static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
- int err;
+ struct igc_nfc_rule *rule;
- spin_lock(&adapter->nfc_lock);
- err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location);
- spin_unlock(&adapter->nfc_lock);
+ mutex_lock(&adapter->nfc_rule_lock);
- return err;
+ rule = igc_get_nfc_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->nfc_rule_lock);
+ return -EINVAL;
+ }
+
+ igc_del_nfc_rule(adapter, rule);
+
+ mutex_unlock(&adapter->nfc_rule_lock);
+ return 0;
}
-static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static int igc_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
{
struct igc_adapter *adapter = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
- ret = igc_set_rss_hash_opt(adapter, cmd);
- break;
+ return igc_ethtool_set_rss_hash_opt(adapter, cmd);
case ETHTOOL_SRXCLSRLINS:
- ret = igc_add_ethtool_nfc_entry(adapter, cmd);
- break;
+ return igc_ethtool_add_nfc_rule(adapter, cmd);
case ETHTOOL_SRXCLSRLDEL:
- ret = igc_del_ethtool_nfc_entry(adapter, cmd);
+ return igc_ethtool_del_nfc_rule(adapter, cmd);
default:
- break;
+ return -EOPNOTSUPP;
}
-
- return ret;
}
void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
@@ -1533,13 +1385,13 @@ void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
}
}
-static u32 igc_get_rxfh_indir_size(struct net_device *netdev)
+static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev)
{
return IGC_RETA_SIZE;
}
-static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
@@ -1554,8 +1406,8 @@ static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
-static int igc_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u32 num_queues;
@@ -1583,18 +1435,13 @@ static int igc_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
}
-static unsigned int igc_max_channels(struct igc_adapter *adapter)
-{
- return igc_get_max_rss_queues(adapter);
-}
-
-static void igc_get_channels(struct net_device *netdev,
- struct ethtool_channels *ch)
+static void igc_ethtool_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
{
struct igc_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = igc_max_channels(adapter);
+ ch->max_combined = igc_get_max_rss_queues(adapter);
/* Report info for other vector */
if (adapter->flags & IGC_FLAG_HAS_MSIX) {
@@ -1605,8 +1452,8 @@ static void igc_get_channels(struct net_device *netdev,
ch->combined_count = adapter->rss_queues;
}
-static int igc_set_channels(struct net_device *netdev,
- struct ethtool_channels *ch)
+static int igc_ethtool_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
{
struct igc_adapter *adapter = netdev_priv(netdev);
unsigned int count = ch->combined_count;
@@ -1621,7 +1468,7 @@ static int igc_set_channels(struct net_device *netdev,
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */
- max_combined = igc_max_channels(adapter);
+ max_combined = igc_get_max_rss_queues(adapter);
if (count > max_combined)
return -EINVAL;
@@ -1638,8 +1485,8 @@ static int igc_set_channels(struct net_device *netdev,
return 0;
}
-static int igc_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+static int igc_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
{
struct igc_adapter *adapter = netdev_priv(dev);
@@ -1671,7 +1518,7 @@ static int igc_get_ts_info(struct net_device *dev,
}
}
-static u32 igc_get_priv_flags(struct net_device *netdev)
+static u32 igc_ethtool_get_priv_flags(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u32 priv_flags = 0;
@@ -1682,7 +1529,7 @@ static u32 igc_get_priv_flags(struct net_device *netdev)
return priv_flags;
}
-static int igc_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct igc_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
@@ -1717,8 +1564,8 @@ static void igc_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
-static int igc_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
+static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
@@ -1824,10 +1671,12 @@ static int igc_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static int igc_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
+static int
+igc_ethtool_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
u32 advertising;
@@ -1835,8 +1684,7 @@ static int igc_set_link_ksettings(struct net_device *netdev,
* cannot be changed
*/
if (igc_check_reset_block(hw)) {
- dev_err(&adapter->pdev->dev,
- "Cannot change link characteristics when reset is active.\n");
+ netdev_err(dev, "Cannot change link characteristics when reset is active\n");
return -EINVAL;
}
@@ -1847,7 +1695,7 @@ static int igc_set_link_ksettings(struct net_device *netdev,
if (cmd->base.eth_tp_mdix_ctrl) {
if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO &&
cmd->base.autoneg != AUTONEG_ENABLE) {
- dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
}
@@ -1864,9 +1712,7 @@ static int igc_set_link_ksettings(struct net_device *netdev,
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {
- /* calling this overrides forced MDI setting */
- dev_info(&adapter->pdev->dev,
- "Force mode currently not supported\n");
+ netdev_info(dev, "Force mode currently not supported\n");
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
@@ -1893,46 +1739,105 @@ static int igc_set_link_ksettings(struct net_device *netdev,
return 0;
}
+static void igc_ethtool_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ netdev_info(adapter->netdev, "Offline testing starting");
+ set_bit(__IGC_TESTING, &adapter->state);
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result
+ */
+ if (!igc_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ igc_close(netdev);
+ else
+ igc_reset(adapter);
+
+ netdev_info(adapter->netdev, "Register testing starting");
+ if (!igc_reg_test(adapter, &data[TEST_REG]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igc_reset(adapter);
+
+ netdev_info(adapter->netdev, "EEPROM testing starting");
+ if (!igc_eeprom_test(adapter, &data[TEST_EEP]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igc_reset(adapter);
+
+ /* loopback and interrupt tests
+ * will be implemented in the future
+ */
+ data[TEST_LOOP] = 0;
+ data[TEST_IRQ] = 0;
+
+ clear_bit(__IGC_TESTING, &adapter->state);
+ if (if_running)
+ igc_open(netdev);
+ } else {
+ netdev_info(adapter->netdev, "Online testing starting");
+
+ /* register, eeprom, intr and loopback tests not run online */
+ data[TEST_REG] = 0;
+ data[TEST_EEP] = 0;
+ data[TEST_IRQ] = 0;
+ data[TEST_LOOP] = 0;
+
+ if (!igc_link_test(adapter, &data[TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ msleep_interruptible(4 * 1000);
+}
+
static const struct ethtool_ops igc_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
- .get_drvinfo = igc_get_drvinfo,
- .get_regs_len = igc_get_regs_len,
- .get_regs = igc_get_regs,
- .get_wol = igc_get_wol,
- .set_wol = igc_set_wol,
- .get_msglevel = igc_get_msglevel,
- .set_msglevel = igc_set_msglevel,
- .nway_reset = igc_nway_reset,
- .get_link = igc_get_link,
- .get_eeprom_len = igc_get_eeprom_len,
- .get_eeprom = igc_get_eeprom,
- .set_eeprom = igc_set_eeprom,
- .get_ringparam = igc_get_ringparam,
- .set_ringparam = igc_set_ringparam,
- .get_pauseparam = igc_get_pauseparam,
- .set_pauseparam = igc_set_pauseparam,
- .get_strings = igc_get_strings,
- .get_sset_count = igc_get_sset_count,
- .get_ethtool_stats = igc_get_ethtool_stats,
- .get_coalesce = igc_get_coalesce,
- .set_coalesce = igc_set_coalesce,
- .get_rxnfc = igc_get_rxnfc,
- .set_rxnfc = igc_set_rxnfc,
- .get_rxfh_indir_size = igc_get_rxfh_indir_size,
- .get_rxfh = igc_get_rxfh,
- .set_rxfh = igc_set_rxfh,
- .get_ts_info = igc_get_ts_info,
- .get_channels = igc_get_channels,
- .set_channels = igc_set_channels,
- .get_priv_flags = igc_get_priv_flags,
- .set_priv_flags = igc_set_priv_flags,
+ .get_drvinfo = igc_ethtool_get_drvinfo,
+ .get_regs_len = igc_ethtool_get_regs_len,
+ .get_regs = igc_ethtool_get_regs,
+ .get_wol = igc_ethtool_get_wol,
+ .set_wol = igc_ethtool_set_wol,
+ .get_msglevel = igc_ethtool_get_msglevel,
+ .set_msglevel = igc_ethtool_set_msglevel,
+ .nway_reset = igc_ethtool_nway_reset,
+ .get_link = igc_ethtool_get_link,
+ .get_eeprom_len = igc_ethtool_get_eeprom_len,
+ .get_eeprom = igc_ethtool_get_eeprom,
+ .set_eeprom = igc_ethtool_set_eeprom,
+ .get_ringparam = igc_ethtool_get_ringparam,
+ .set_ringparam = igc_ethtool_set_ringparam,
+ .get_pauseparam = igc_ethtool_get_pauseparam,
+ .set_pauseparam = igc_ethtool_set_pauseparam,
+ .get_strings = igc_ethtool_get_strings,
+ .get_sset_count = igc_ethtool_get_sset_count,
+ .get_ethtool_stats = igc_ethtool_get_stats,
+ .get_coalesce = igc_ethtool_get_coalesce,
+ .set_coalesce = igc_ethtool_set_coalesce,
+ .get_rxnfc = igc_ethtool_get_rxnfc,
+ .set_rxnfc = igc_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size,
+ .get_rxfh = igc_ethtool_get_rxfh,
+ .set_rxfh = igc_ethtool_set_rxfh,
+ .get_ts_info = igc_ethtool_get_ts_info,
+ .get_channels = igc_ethtool_get_channels,
+ .set_channels = igc_ethtool_set_channels,
+ .get_priv_flags = igc_ethtool_get_priv_flags,
+ .set_priv_flags = igc_ethtool_set_priv_flags,
.begin = igc_ethtool_begin,
.complete = igc_ethtool_complete,
- .get_link_ksettings = igc_get_link_ksettings,
- .set_link_ksettings = igc_set_link_ksettings,
+ .get_link_ksettings = igc_ethtool_get_link_ksettings,
+ .set_link_ksettings = igc_ethtool_set_link_ksettings,
+ .self_test = igc_ethtool_diag_test,
};
-void igc_set_ethtool_ops(struct net_device *netdev)
+void igc_ethtool_set_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &igc_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 90ac0e0144d8..af34ae310327 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -21,6 +21,9 @@
#define IGC_DEV_ID_I225_I 0x15F8
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
+#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I225_LMVP 0x5502
+#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 12aa6b5fcb5d..410aeb01de5c 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -235,15 +235,14 @@ out:
void igc_clear_hw_cntrs_base(struct igc_hw *hw)
{
rd32(IGC_CRCERRS);
- rd32(IGC_SYMERRS);
rd32(IGC_MPC);
rd32(IGC_SCC);
rd32(IGC_ECOL);
rd32(IGC_MCC);
rd32(IGC_LATECOL);
rd32(IGC_COLC);
+ rd32(IGC_RERC);
rd32(IGC_DC);
- rd32(IGC_SEC);
rd32(IGC_RLEC);
rd32(IGC_XONRXC);
rd32(IGC_XONTXC);
@@ -288,7 +287,7 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw)
rd32(IGC_ALGNERRC);
rd32(IGC_RXERRC);
rd32(IGC_TNCRS);
- rd32(IGC_CEXTERR);
+ rd32(IGC_HTDPMC);
rd32(IGC_TSCTC);
rd32(IGC_TSCTFC);
@@ -307,12 +306,8 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw)
rd32(IGC_ICTXQMTC);
rd32(IGC_ICRXDMTC);
- rd32(IGC_CBTMPC);
- rd32(IGC_HTDPMC);
- rd32(IGC_CBRMPC);
rd32(IGC_RPTHC);
rd32(IGC_HGPTC);
- rd32(IGC_HTCBDPC);
rd32(IGC_HGORCL);
rd32(IGC_HGORCH);
rd32(IGC_HGOTCL);
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h
index 832cccec87cd..b5963f86defb 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.h
+++ b/drivers/net/ethernet/intel/igc/igc_mac.h
@@ -8,10 +8,6 @@
#include "igc_phy.h"
#include "igc_defines.h"
-#ifndef IGC_REMOVED
-#define IGC_REMOVED(a) (0)
-#endif /* IGC_REMOVED */
-
/* forward declaration */
s32 igc_disable_pcie_master(struct igc_hw *hw);
s32 igc_check_for_copper_link(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 59fc0097438f..6919c50e449a 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -9,11 +9,13 @@
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/pm_runtime.h>
+#include <net/pkt_sched.h>
#include <net/ipv6.h>
#include "igc.h"
#include "igc_hw.h"
+#include "igc_tsn.h"
#define DRV_VERSION "0.0.1-k"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
@@ -45,6 +47,9 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
@@ -71,7 +76,7 @@ static void igc_power_down_link(struct igc_adapter *adapter)
void igc_reset(struct igc_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
struct igc_fc_info *fc = &hw->fc;
u32 pba, hwm;
@@ -98,7 +103,7 @@ void igc_reset(struct igc_adapter *adapter)
hw->mac.ops.reset_hw(hw);
if (hw->mac.ops.init_hw(hw))
- dev_err(&pdev->dev, "Hardware Error\n");
+ netdev_err(dev, "Error on hardware initialization\n");
if (!netif_running(adapter->netdev))
igc_power_down_link(adapter);
@@ -106,6 +111,9 @@ void igc_reset(struct igc_adapter *adapter)
/* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter);
+ /* Re-enable TSN offloading, where applicable. */
+ igc_tsn_offload_apply(adapter);
+
igc_get_phy_info(hw);
}
@@ -280,6 +288,7 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
*/
int igc_setup_tx_resources(struct igc_ring *tx_ring)
{
+ struct net_device *ndev = tx_ring->netdev;
struct device *dev = tx_ring->dev;
int size = 0;
@@ -305,8 +314,7 @@ int igc_setup_tx_resources(struct igc_ring *tx_ring)
err:
vfree(tx_ring->tx_buffer_info);
- dev_err(dev,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
return -ENOMEM;
}
@@ -318,14 +326,13 @@ err:
*/
static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
err = igc_setup_tx_resources(adapter->tx_ring[i]);
if (err) {
- dev_err(&pdev->dev,
- "Allocation for Tx Queue %u failed\n", i);
+ netdev_err(dev, "Error on Tx queue %u setup\n", i);
for (i--; i >= 0; i--)
igc_free_tx_resources(adapter->tx_ring[i]);
break;
@@ -436,6 +443,7 @@ static void igc_free_all_rx_resources(struct igc_adapter *adapter)
*/
int igc_setup_rx_resources(struct igc_ring *rx_ring)
{
+ struct net_device *ndev = rx_ring->netdev;
struct device *dev = rx_ring->dev;
int size, desc_len;
@@ -465,8 +473,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
err:
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dev_err(dev,
- "Unable to allocate memory for the receive descriptor ring\n");
+ netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
return -ENOMEM;
}
@@ -479,14 +486,13 @@ err:
*/
static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
err = igc_setup_rx_resources(adapter->rx_ring[i]);
if (err) {
- dev_err(&pdev->dev,
- "Allocation for Rx Queue %u failed\n", i);
+ netdev_err(dev, "Error on Rx queue %u setup\n", i);
for (i--; i >= 0; i--)
igc_free_rx_resources(adapter->rx_ring[i]);
break;
@@ -757,48 +763,76 @@ static void igc_setup_tctl(struct igc_adapter *adapter)
}
/**
- * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
- * @adapter: address of board private structure
- * @index: Index of the RAR entry which need to be synced with MAC table
+ * igc_set_mac_filter_hw() - Set MAC address filter in hardware
+ * @adapter: Pointer to adapter where the filter should be set
+ * @index: Filter index
+ * @type: MAC address filter type (source or destination)
+ * @addr: MAC address
+ * @queue: If non-negative, queue assignment feature is enabled and frames
+ * matching the filter are enqueued onto 'queue'. Otherwise, queue
+ * assignment is disabled.
*/
-static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
+static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
+ enum igc_mac_filter_type type,
+ const u8 *addr, int queue)
{
- u8 *addr = adapter->mac_table[index].addr;
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
- u32 rar_low, rar_high;
+ u32 ral, rah;
- /* HW expects these to be in network order when they are plugged
- * into the registers which are little endian. In order to guarantee
- * that ordering we need to do an leXX_to_cpup here in order to be
- * ready for the byteswap that occurs with writel
- */
- rar_low = le32_to_cpup((__le32 *)(addr));
- rar_high = le16_to_cpup((__le16 *)(addr + 4));
+ if (WARN_ON(index >= hw->mac.rar_entry_count))
+ return;
- /* Indicate to hardware the Address is Valid. */
- if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
- if (is_valid_ether_addr(addr))
- rar_high |= IGC_RAH_AV;
+ ral = le32_to_cpup((__le32 *)(addr));
+ rah = le16_to_cpup((__le16 *)(addr + 4));
- rar_high |= IGC_RAH_POOL_1 <<
- adapter->mac_table[index].queue;
+ if (type == IGC_MAC_FILTER_TYPE_SRC) {
+ rah &= ~IGC_RAH_ASEL_MASK;
+ rah |= IGC_RAH_ASEL_SRC_ADDR;
}
- wr32(IGC_RAL(index), rar_low);
- wrfl();
- wr32(IGC_RAH(index), rar_high);
- wrfl();
+ if (queue >= 0) {
+ rah &= ~IGC_RAH_QSEL_MASK;
+ rah |= (queue << IGC_RAH_QSEL_SHIFT);
+ rah |= IGC_RAH_QSEL_ENABLE;
+ }
+
+ rah |= IGC_RAH_AV;
+
+ wr32(IGC_RAL(index), ral);
+ wr32(IGC_RAH(index), rah);
+
+ netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
+}
+
+/**
+ * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
+ * @adapter: Pointer to adapter where the filter should be cleared
+ * @index: Filter index
+ */
+static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
+{
+ struct net_device *dev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ if (WARN_ON(index >= hw->mac.rar_entry_count))
+ return;
+
+ wr32(IGC_RAL(index), 0);
+ wr32(IGC_RAH(index), 0);
+
+ netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
}
/* Set default MAC address for the PF in the first RAR entry */
static void igc_set_default_mac_filter(struct igc_adapter *adapter)
{
- struct igc_mac_addr *mac_table = &adapter->mac_table[0];
+ struct net_device *dev = adapter->netdev;
+ u8 *addr = adapter->hw.mac.addr;
- ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
- mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
- igc_rar_set_index(adapter, 0);
+ igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
}
/**
@@ -864,6 +898,23 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev);
}
+static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
+{
+ ktime_t cycle_time = adapter->cycle_time;
+ ktime_t base_time = adapter->base_time;
+ u32 launchtime;
+
+ /* FIXME: when using ETF together with taprio, we may have a
+ * case where 'delta' is larger than the cycle_time, this may
+ * cause problems if we don't read the current value of
+ * IGC_BASET, as the value writen into the launchtime
+ * descriptor field may be misinterpreted.
+ */
+ div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
+
+ return cpu_to_le32(launchtime);
+}
+
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd,
@@ -871,7 +922,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
{
struct igc_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
- struct timespec64 ts;
context_desc = IGC_TX_CTXTDESC(tx_ring, i);
@@ -893,9 +943,12 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
- ts = ktime_to_timespec64(first->skb->tstamp);
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+ ktime_t txtime = first->skb->tstamp;
+
first->skb->tstamp = ktime_set(0, 0);
- context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
+ context_desc->launch_time = igc_tx_launchtime(adapter,
+ txtime);
} else {
context_desc->launch_time = 0;
}
@@ -1143,7 +1196,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
return 0;
dma_error:
- dev_err(tx_ring->dev, "TX DMA map failed\n");
+ netdev_err(tx_ring->netdev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
@@ -1406,8 +1459,8 @@ static void igc_rx_checksum(struct igc_ring *ring,
IGC_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(ring->dev, "cksum success: bits %08X\n",
- le32_to_cpu(rx_desc->wb.upper.status_error));
+ netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
+ le32_to_cpu(rx_desc->wb.upper.status_error));
}
static inline void igc_rx_hash(struct igc_ring *ring,
@@ -2069,27 +2122,27 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
(adapter->tx_timeout_factor * HZ)) &&
!(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
/* detected Tx unit hang */
- dev_err(tx_ring->dev,
- "Detected Tx Unit Hang\n"
- " Tx Queue <%d>\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%p>\n"
- " jiffies <%lx>\n"
- " desc.status <%x>\n",
- tx_ring->queue_index,
- rd32(IGC_TDH(tx_ring->reg_idx)),
- readl(tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_buffer->time_stamp,
- tx_buffer->next_to_watch,
- jiffies,
- tx_buffer->next_to_watch->wb.status);
+ netdev_err(tx_ring->netdev,
+ "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%p>\n"
+ " jiffies <%lx>\n"
+ " desc.status <%x>\n",
+ tx_ring->queue_index,
+ rd32(IGC_TDH(tx_ring->reg_idx)),
+ readl(tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_buffer->time_stamp,
+ tx_buffer->next_to_watch,
+ jiffies,
+ tx_buffer->next_to_watch->wb.status);
netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index);
@@ -2121,140 +2174,435 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
return !!budget;
}
-static void igc_nfc_filter_restore(struct igc_adapter *adapter)
+static int igc_find_mac_filter(struct igc_adapter *adapter,
+ enum igc_mac_filter_type type, const u8 *addr)
{
- struct igc_nfc_filter *rule;
+ struct igc_hw *hw = &adapter->hw;
+ int max_entries = hw->mac.rar_entry_count;
+ u32 ral, rah;
+ int i;
- spin_lock(&adapter->nfc_lock);
+ for (i = 0; i < max_entries; i++) {
+ ral = rd32(IGC_RAL(i));
+ rah = rd32(IGC_RAH(i));
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
- igc_add_filter(adapter, rule);
+ if (!(rah & IGC_RAH_AV))
+ continue;
+ if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
+ continue;
+ if ((rah & IGC_RAH_RAH_MASK) !=
+ le16_to_cpup((__le16 *)(addr + 4)))
+ continue;
+ if (ral != le32_to_cpup((__le32 *)(addr)))
+ continue;
+
+ return i;
+ }
- spin_unlock(&adapter->nfc_lock);
+ return -1;
}
-/* If the filter to be added and an already existing filter express
- * the same address and address type, it should be possible to only
- * override the other configurations, for example the queue to steer
- * traffic.
- */
-static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
- const u8 *addr, const u8 flags)
+static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
{
- if (!(entry->state & IGC_MAC_STATE_IN_USE))
- return true;
+ struct igc_hw *hw = &adapter->hw;
+ int max_entries = hw->mac.rar_entry_count;
+ u32 rah;
+ int i;
- if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
- (flags & IGC_MAC_STATE_SRC_ADDR))
- return false;
+ for (i = 0; i < max_entries; i++) {
+ rah = rd32(IGC_RAH(i));
- if (!ether_addr_equal(addr, entry->addr))
- return false;
+ if (!(rah & IGC_RAH_AV))
+ return i;
+ }
- return true;
+ return -1;
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+/**
+ * igc_add_mac_filter() - Add MAC address filter
+ * @adapter: Pointer to adapter where the filter should be added
+ * @type: MAC address filter type (source or destination)
+ * @addr: MAC address
+ * @queue: If non-negative, queue assignment feature is enabled and frames
+ * matching the filter are enqueued onto 'queue'. Otherwise, queue
+ * assignment is disabled.
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
*/
static int igc_add_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+ enum igc_mac_filter_type type, const u8 *addr,
+ int queue)
+{
+ struct net_device *dev = adapter->netdev;
+ int index;
+
+ index = igc_find_mac_filter(adapter, type, addr);
+ if (index >= 0)
+ goto update_filter;
+
+ index = igc_get_avail_mac_filter_slot(adapter);
+ if (index < 0)
+ return -ENOSPC;
+
+ netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
+ index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
+ addr, queue);
+
+update_filter:
+ igc_set_mac_filter_hw(adapter, index, type, addr, queue);
+ return 0;
+}
+
+/**
+ * igc_del_mac_filter() - Delete MAC address filter
+ * @adapter: Pointer to adapter where the filter should be deleted from
+ * @type: MAC address filter type (source or destination)
+ * @addr: MAC address
+ */
+static void igc_del_mac_filter(struct igc_adapter *adapter,
+ enum igc_mac_filter_type type, const u8 *addr)
{
+ struct net_device *dev = adapter->netdev;
+ int index;
+
+ index = igc_find_mac_filter(adapter, type, addr);
+ if (index < 0)
+ return;
+
+ if (index == 0) {
+ /* If this is the default filter, we don't actually delete it.
+ * We just reset to its default value i.e. disable queue
+ * assignment.
+ */
+ netdev_dbg(dev, "Disable default MAC filter queue assignment");
+
+ igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
+ } else {
+ netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
+ index,
+ type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
+ addr);
+
+ igc_clear_mac_filter_hw(adapter, index);
+ }
+}
+
+/**
+ * igc_add_vlan_prio_filter() - Add VLAN priority filter
+ * @adapter: Pointer to adapter where the filter should be added
+ * @prio: VLAN priority value
+ * @queue: Queue number which matching frames are assigned to
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
+ */
+static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
+ int queue)
+{
+ struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
+ u32 vlanpqf;
- if (is_zero_ether_addr(addr))
- return -EINVAL;
+ vlanpqf = rd32(IGC_VLANPQF);
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, 0))
- continue;
+ if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
+ netdev_dbg(dev, "VLAN priority filter already in use\n");
+ return -EEXIST;
+ }
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+ vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
+ vlanpqf |= IGC_VLANPQF_VALID(prio);
- igc_rar_set_index(adapter, i);
- return i;
+ wr32(IGC_VLANPQF, vlanpqf);
+
+ netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
+ prio, queue);
+ return 0;
+}
+
+/**
+ * igc_del_vlan_prio_filter() - Delete VLAN priority filter
+ * @adapter: Pointer to adapter where the filter should be deleted from
+ * @prio: VLAN priority value
+ */
+static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 vlanpqf;
+
+ vlanpqf = rd32(IGC_VLANPQF);
+
+ vlanpqf &= ~IGC_VLANPQF_VALID(prio);
+ vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
+
+ wr32(IGC_VLANPQF, vlanpqf);
+
+ netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
+ prio);
+}
+
+static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < MAX_ETYPE_FILTER; i++) {
+ u32 etqf = rd32(IGC_ETQF(i));
+
+ if (!(etqf & IGC_ETQF_FILTER_ENABLE))
+ return i;
}
- return -ENOSPC;
+ return -1;
}
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
+/**
+ * igc_add_etype_filter() - Add ethertype filter
+ * @adapter: Pointer to adapter where the filter should be added
+ * @etype: Ethertype value
+ * @queue: If non-negative, queue assignment feature is enabled and frames
+ * matching the filter are enqueued onto 'queue'. Otherwise, queue
+ * assignment is disabled.
+ *
+ * Return: 0 in case of success, negative errno code otherwise.
*/
-static int igc_del_mac_filter(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue)
+static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
+ int queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int index;
+ u32 etqf;
+
+ index = igc_get_avail_etype_filter_slot(adapter);
+ if (index < 0)
+ return -ENOSPC;
+
+ etqf = rd32(IGC_ETQF(index));
+
+ etqf &= ~IGC_ETQF_ETYPE_MASK;
+ etqf |= etype;
+
+ if (queue >= 0) {
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
+ etqf |= IGC_ETQF_QUEUE_ENABLE;
+ }
+
+ etqf |= IGC_ETQF_FILTER_ENABLE;
+
+ wr32(IGC_ETQF(index), etqf);
+
+ netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
+ etype, queue);
+ return 0;
+}
+
+static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
{
struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
int i;
- if (is_zero_ether_addr(addr))
- return -EINVAL;
+ for (i = 0; i < MAX_ETYPE_FILTER; i++) {
+ u32 etqf = rd32(IGC_ETQF(i));
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if (adapter->mac_table[i].state != 0)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
+ if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
+ return i;
+ }
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
- */
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- adapter->mac_table[i].queue = 0;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
+ return -1;
+}
- igc_rar_set_index(adapter, i);
- return 0;
+/**
+ * igc_del_etype_filter() - Delete ethertype filter
+ * @adapter: Pointer to adapter where the filter should be deleted from
+ * @etype: Ethertype value
+ */
+static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int index;
+
+ index = igc_find_etype_filter(adapter, etype);
+ if (index < 0)
+ return;
+
+ wr32(IGC_ETQF(index), 0);
+
+ netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
+ etype);
+}
+
+static int igc_enable_nfc_rule(struct igc_adapter *adapter,
+ const struct igc_nfc_rule *rule)
+{
+ int err;
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ err = igc_add_etype_filter(adapter, rule->filter.etype,
+ rule->action);
+ if (err)
+ return err;
+ }
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
+ rule->filter.src_addr, rule->action);
+ if (err)
+ return err;
+ }
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
+ rule->filter.dst_addr, rule->action);
+ if (err)
+ return err;
+ }
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+
+ err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void igc_disable_nfc_rule(struct igc_adapter *adapter,
+ const struct igc_nfc_rule *rule)
+{
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
+ igc_del_etype_filter(adapter, rule->filter.etype);
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+
+ igc_del_vlan_prio_filter(adapter, prio);
}
- return -ENOENT;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
+ rule->filter.src_addr);
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
+ rule->filter.dst_addr);
+}
+
+/**
+ * igc_get_nfc_rule() - Get NFC rule
+ * @adapter: Pointer to adapter
+ * @location: Rule location
+ *
+ * Context: Expects adapter->nfc_rule_lock to be held by caller.
+ *
+ * Return: Pointer to NFC rule at @location. If not found, NULL.
+ */
+struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
+ u32 location)
+{
+ struct igc_nfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * igc_del_nfc_rule() - Delete NFC rule
+ * @adapter: Pointer to adapter
+ * @rule: Pointer to rule to be deleted
+ *
+ * Disable NFC rule in hardware and delete it from adapter.
+ *
+ * Context: Expects adapter->nfc_rule_lock to be held by caller.
+ */
+void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
+{
+ igc_disable_nfc_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->nfc_rule_count--;
+
+ kfree(rule);
+}
+
+static void igc_flush_nfc_rules(struct igc_adapter *adapter)
+{
+ struct igc_nfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->nfc_rule_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
+ igc_del_nfc_rule(adapter, rule);
+
+ mutex_unlock(&adapter->nfc_rule_lock);
+}
+
+/**
+ * igc_add_nfc_rule() - Add NFC rule
+ * @adapter: Pointer to adapter
+ * @rule: Pointer to rule to be added
+ *
+ * Enable NFC rule in hardware and add it to adapter.
+ *
+ * Context: Expects adapter->nfc_rule_lock to be held by caller.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
+{
+ struct igc_nfc_rule *pred, *cur;
+ int err;
+
+ err = igc_enable_nfc_rule(adapter, rule);
+ if (err)
+ return err;
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
+ adapter->nfc_rule_count++;
+ return 0;
+}
+
+static void igc_restore_nfc_rules(struct igc_adapter *adapter)
+{
+ struct igc_nfc_rule *rule;
+
+ mutex_lock(&adapter->nfc_rule_lock);
+
+ list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
+ igc_enable_nfc_rule(adapter, rule);
+
+ mutex_unlock(&adapter->nfc_rule_lock);
}
static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- int ret;
-
- ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
- return min_t(int, ret, 0);
+ return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
}
static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct igc_adapter *adapter = netdev_priv(netdev);
- igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
-
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
return 0;
}
@@ -2325,7 +2673,9 @@ static void igc_configure(struct igc_adapter *adapter)
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
- igc_nfc_filter_restore(adapter);
+ igc_set_default_mac_filter(adapter);
+ igc_restore_nfc_rules(adapter);
+
igc_configure_tx(adapter);
igc_configure_rx(adapter);
@@ -2518,12 +2868,7 @@ void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
{
- unsigned int max_rss_queues;
-
- /* Determine the maximum number of RSS queues supported. */
- max_rss_queues = IGC_MAX_RX_QUEUES;
-
- return max_rss_queues;
+ return IGC_MAX_RX_QUEUES;
}
static void igc_init_queue_configuration(struct igc_adapter *adapter)
@@ -3164,14 +3509,14 @@ err_out:
*/
static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
int err = 0;
igc_set_interrupt_capability(adapter, msix);
err = igc_alloc_q_vectors(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ netdev_err(dev, "Unable to allocate memory for vectors\n");
goto err_alloc_q_vectors;
}
@@ -3198,8 +3543,6 @@ static int igc_sw_init(struct igc_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
- int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
-
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
/* set default ring sizes */
@@ -3218,20 +3561,19 @@ static int igc_sw_init(struct igc_adapter *adapter)
VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- spin_lock_init(&adapter->nfc_lock);
+ mutex_init(&adapter->nfc_rule_lock);
+ INIT_LIST_HEAD(&adapter->nfc_rule_list);
+ adapter->nfc_rule_count = 0;
+
spin_lock_init(&adapter->stats64_lock);
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
adapter->flags |= IGC_FLAG_HAS_MSIX;
- adapter->mac_table = kzalloc(size, GFP_ATOMIC);
- if (!adapter->mac_table)
- return -ENOMEM;
-
igc_init_queue_configuration(adapter);
/* This call may decrease the number of queues */
if (igc_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -3359,8 +3701,6 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.prc511 += rd32(IGC_PRC511);
adapter->stats.prc1023 += rd32(IGC_PRC1023);
adapter->stats.prc1522 += rd32(IGC_PRC1522);
- adapter->stats.symerrs += rd32(IGC_SYMERRS);
- adapter->stats.sec += rd32(IGC_SEC);
mpc = rd32(IGC_MPC);
adapter->stats.mpc += mpc;
@@ -3399,6 +3739,7 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.tpt += rd32(IGC_TPT);
adapter->stats.colc += rd32(IGC_COLC);
+ adapter->stats.colc += rd32(IGC_RERC);
adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
@@ -3449,21 +3790,6 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.mgpdc += rd32(IGC_MGTPDC);
}
-static void igc_nfc_filter_exit(struct igc_adapter *adapter)
-{
- struct igc_nfc_filter *rule;
-
- spin_lock(&adapter->nfc_lock);
-
- hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
- igc_erase_filter(adapter, rule);
-
- hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
- igc_erase_filter(adapter, rule);
-
- spin_unlock(&adapter->nfc_lock);
-}
-
/**
* igc_down - Close the interface
* @adapter: board private structure
@@ -3482,8 +3808,6 @@ void igc_down(struct igc_adapter *adapter)
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* flush and sleep below */
- igc_nfc_filter_exit(adapter);
-
/* set trans_start so we don't get spurious watchdogs during reset */
netif_trans_update(netdev);
@@ -3577,8 +3901,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igc_down(adapter);
- netdev_dbg(netdev, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -3633,20 +3956,8 @@ static int igc_set_features(struct net_device *netdev,
if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
- if (!(features & NETIF_F_NTUPLE)) {
- struct hlist_node *node2;
- struct igc_nfc_filter *rule;
-
- spin_lock(&adapter->nfc_lock);
- hlist_for_each_entry_safe(rule, node2,
- &adapter->nfc_filter_list, nfc_node) {
- igc_erase_filter(adapter, rule);
- hlist_del(&rule->nfc_node);
- kfree(rule);
- }
- spin_unlock(&adapter->nfc_lock);
- adapter->nfc_filter_count = 0;
- }
+ if (!(features & NETIF_F_NTUPLE))
+ igc_flush_nfc_rules(adapter);
netdev->features = features;
@@ -3689,106 +4000,6 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
-/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
- * 'flags' is used to indicate what kind of match is made, match is by
- * default for the destination address, if matching by source address
- * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_add_mac_filter_flags(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue,
- const u8 flags)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for the first empty entry in the MAC table.
- * Do not touch entries at the end of the table reserved for the VF MAC
- * addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
- addr, flags))
- continue;
-
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
- adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags;
-
- igc_rar_set_index(adapter, i);
- return i;
- }
-
- return -ENOSPC;
-}
-
-int igc_add_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags)
-{
- return igc_add_mac_filter_flags(adapter, addr, queue,
- IGC_MAC_STATE_QUEUE_STEERING | flags);
-}
-
-/* Remove a MAC filter for 'addr' directing matching traffic to
- * 'queue', 'flags' is used to indicate what kind of match need to be
- * removed, match is by default for the destination address, if
- * matching by source address is to be removed the flag
- * IGC_MAC_STATE_SRC_ADDR can be used.
- */
-static int igc_del_mac_filter_flags(struct igc_adapter *adapter,
- const u8 *addr, const u8 queue,
- const u8 flags)
-{
- struct igc_hw *hw = &adapter->hw;
- int rar_entries = hw->mac.rar_entry_count;
- int i;
-
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* Search for matching entry in the MAC table based on given address
- * and queue. Do not touch entries at the end of the table reserved
- * for the VF MAC addresses.
- */
- for (i = 0; i < rar_entries; i++) {
- if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
- continue;
- if ((adapter->mac_table[i].state & flags) != flags)
- continue;
- if (adapter->mac_table[i].queue != queue)
- continue;
- if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
- continue;
-
- /* When a filter for the default address is "deleted",
- * we return it to its initial configuration
- */
- if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
- adapter->mac_table[i].state =
- IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
- } else {
- adapter->mac_table[i].state = 0;
- adapter->mac_table[i].queue = 0;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- }
-
- igc_rar_set_index(adapter, i);
- return 0;
- }
-
- return -ENOENT;
-}
-
-int igc_del_mac_steering_filter(struct igc_adapter *adapter,
- const u8 *addr, u8 queue, u8 flags)
-{
- return igc_del_mac_filter_flags(adapter, addr, queue,
- IGC_MAC_STATE_QUEUE_STEERING | flags);
-}
-
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -4009,7 +4220,6 @@ static void igc_watchdog_task(struct work_struct *work)
struct igc_hw *hw = &adapter->hw;
struct igc_phy_info *phy = &hw->phy;
u16 phy_data, retry_count = 20;
- u32 connsw;
u32 link;
int i;
@@ -4022,14 +4232,6 @@ static void igc_watchdog_task(struct work_struct *work)
link = false;
}
- /* Force link down if we have fiber to swap to */
- if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
- if (hw->phy.media_type == igc_media_type_copper) {
- connsw = rd32(IGC_CONNSW);
- if (!(connsw & IGC_CONNSW_AUTOSENSE_EN))
- link = 0;
- }
- }
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4044,8 +4246,7 @@ static void igc_watchdog_task(struct work_struct *work)
ctrl = rd32(IGC_CTRL);
/* Link status message must follow this format */
netdev_info(netdev,
- "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- netdev->name,
+ "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
"Full" : "Half",
@@ -4083,10 +4284,10 @@ retry_read_status:
retry_count--;
goto retry_read_status;
} else if (!retry_count) {
- dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
+ netdev_err(netdev, "exceed max 2 second\n");
}
} else {
- dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
+ netdev_err(netdev, "read 1000Base-T Status Reg\n");
}
no_wait:
netif_carrier_on(netdev);
@@ -4102,8 +4303,7 @@ no_wait:
adapter->link_duplex = 0;
/* Links status message must follow this format */
- netdev_info(netdev, "igc: %s NIC Link is Down\n",
- netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
/* link state has changed, schedule phy info update */
@@ -4321,8 +4521,7 @@ static int igc_request_irq(struct igc_adapter *adapter)
netdev->name, adapter);
if (err)
- dev_err(&pdev->dev, "Error %d getting interrupt\n",
- err);
+ netdev_err(netdev, "Error %d getting interrupt\n", err);
request_done:
return err;
@@ -4424,7 +4623,7 @@ err_setup_tx:
return err;
}
-static int igc_open(struct net_device *netdev)
+int igc_open(struct net_device *netdev)
{
return __igc_open(netdev, false);
}
@@ -4466,7 +4665,7 @@ static int __igc_close(struct net_device *netdev, bool suspending)
return 0;
}
-static int igc_close(struct net_device *netdev)
+int igc_close(struct net_device *netdev)
{
if (netif_device_present(netdev) || netdev->dismantle)
return __igc_close(netdev, false);
@@ -4491,6 +4690,158 @@ static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
+static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
+ bool enable)
+{
+ struct igc_ring *ring;
+ int i;
+
+ if (queue < 0 || queue >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+ ring->launchtime_enable = enable;
+
+ if (adapter->base_time)
+ return 0;
+
+ adapter->cycle_time = NSEC_PER_SEC;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ring = adapter->tx_ring[i];
+ ring->start_time = 0;
+ ring->end_time = NSEC_PER_SEC;
+ }
+
+ return 0;
+}
+
+static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
+{
+ int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ size_t n;
+
+ if (qopt->cycle_time_extension)
+ return false;
+
+ for (n = 0; n < qopt->num_entries; n++) {
+ const struct tc_taprio_sched_entry *e;
+ int i;
+
+ e = &qopt->entries[n];
+
+ /* i225 only supports "global" frame preemption
+ * settings.
+ */
+ if (e->command != TC_TAPRIO_CMD_SET_GATES)
+ return false;
+
+ for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+ if (e->gate_mask & BIT(i))
+ queue_uses[i]++;
+
+ if (queue_uses[i] > 1)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
+ struct tc_etf_qopt_offload *qopt)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
+ if (err)
+ return err;
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+static int igc_save_qbv_schedule(struct igc_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u32 start_time = 0, end_time = 0;
+ size_t n;
+
+ if (!qopt->enable) {
+ adapter->base_time = 0;
+ return 0;
+ }
+
+ if (adapter->base_time)
+ return -EALREADY;
+
+ if (!validate_schedule(qopt))
+ return -EINVAL;
+
+ adapter->cycle_time = qopt->cycle_time;
+ adapter->base_time = qopt->base_time;
+
+ /* FIXME: be a little smarter about cases when the gate for a
+ * queue stays open for more than one entry.
+ */
+ for (n = 0; n < qopt->num_entries; n++) {
+ struct tc_taprio_sched_entry *e = &qopt->entries[n];
+ int i;
+
+ end_time += e->interval;
+
+ for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ if (!(e->gate_mask & BIT(i)))
+ continue;
+
+ ring->start_time = start_time;
+ ring->end_time = end_time;
+ }
+
+ start_time += e->interval;
+ }
+
+ return 0;
+}
+
+static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != igc_i225)
+ return -EOPNOTSUPP;
+
+ err = igc_save_qbv_schedule(adapter, qopt);
+ if (err)
+ return err;
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return igc_tsn_enable_qbv_scheduling(adapter, type_data);
+
+ case TC_SETUP_QDISC_ETF:
+ return igc_tsn_enable_launchtime(adapter, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -4503,6 +4854,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
.ndo_do_ioctl = igc_ioctl,
+ .ndo_setup_tc = igc_setup_tc,
};
/* PCIe configuration access */
@@ -4550,9 +4902,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
- if (IGC_REMOVED(hw_addr))
- return ~value;
-
value = readl(&hw_addr[reg]);
/* reads should not return all F's */
@@ -4571,7 +4920,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
{
- struct pci_dev *pdev = adapter->pdev;
struct igc_mac_info *mac = &adapter->hw.mac;
mac->autoneg = 0;
@@ -4616,7 +4964,7 @@ int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
return 0;
err_inval:
- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
+ netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
@@ -4697,7 +5045,7 @@ static int igc_probe(struct pci_dev *pdev,
hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igc_netdev_ops;
- igc_set_ethtool_ops(netdev);
+ igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
netdev->mem_start = pci_resource_start(pdev, 0);
@@ -4723,9 +5071,21 @@ static int igc_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_SG;
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
+ netdev->features |= NETIF_F_TSO_ECN;
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_HW_TC;
+
+#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
/* setup the private structure */
err = igc_sw_init(adapter);
@@ -4750,8 +5110,7 @@ static int igc_probe(struct pci_dev *pdev,
if (igc_get_flash_presence_i225(hw)) {
if (hw->nvm.ops.validate(hw) < 0) {
- dev_err(&pdev->dev,
- "The NVM Checksum Is Not Valid\n");
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -4865,6 +5224,8 @@ static void igc_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
+ igc_flush_nfc_rules(adapter);
+
igc_ptp_stop(adapter);
set_bit(__IGC_DOWN, &adapter->state);
@@ -4885,7 +5246,6 @@ static void igc_remove(struct pci_dev *pdev)
pci_iounmap(pdev, adapter->io_addr);
pci_release_mem_regions(pdev);
- kfree(adapter->mac_table);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
@@ -5014,8 +5374,7 @@ static int __maybe_unused igc_resume(struct device *dev)
return -ENODEV;
err = pci_enable_device_mem(pdev);
if (err) {
- dev_err(&pdev->dev,
- "igc: Cannot enable PCI device from suspend\n");
+ netdev_err(netdev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -5024,7 +5383,7 @@ static int __maybe_unused igc_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3cold, 0);
if (igc_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -5128,8 +5487,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result;
if (pci_enable_device_mem(pdev)) {
- dev_err(&pdev->dev,
- "Could not re-enable PCI device after reset.\n");
+ netdev_err(netdev, "Could not re-enable PCI device after reset\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
@@ -5168,7 +5526,7 @@ static void igc_io_resume(struct pci_dev *pdev)
rtnl_lock();
if (netif_running(netdev)) {
if (igc_open(netdev)) {
- dev_err(&pdev->dev, "igc_open failed after reset\n");
+ netdev_err(netdev, "igc_open failed after reset\n");
return;
}
}
@@ -5215,7 +5573,6 @@ static struct pci_driver igc_driver = {
int igc_reinit_queues(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
int err = 0;
if (netif_running(netdev))
@@ -5224,7 +5581,7 @@ int igc_reinit_queues(struct igc_adapter *adapter)
igc_reset_interrupt_capability(adapter);
if (igc_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index f99c514ad0f4..0d746f8588c8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -305,7 +305,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
u32 tsync_rx_cfg = 0;
bool is_l4 = false;
- bool is_l2 = false;
u32 regval;
/* reserved for future extensions */
@@ -346,7 +345,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- is_l2 = true;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -370,7 +368,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- is_l2 = true;
is_l4 = true;
if (hw->mac.type == igc_i225) {
@@ -405,15 +402,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
/* define which PTP packets are time stamped */
wr32(IGC_TSYNCRXCFG, tsync_rx_cfg);
- /* define ethertype filter for timestamped packets */
- if (is_l2)
- wr32(IGC_ETQF(3),
- (IGC_ETQF_FILTER_ENABLE | /* enable filter */
- IGC_ETQF_1588 | /* enable timestamping */
- ETH_P_1588)); /* 1588 eth protocol type */
- else
- wr32(IGC_ETQF(3), 0);
-
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
u32 ftqf = (IPPROTO_UDP /* UDP */
@@ -466,7 +454,7 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter)
* interrupt
*/
rd32(IGC_TXSTMPH);
- dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n");
}
}
@@ -529,7 +517,7 @@ static void igc_ptp_tx_work(struct work_struct *work)
* interrupt
*/
rd32(IGC_TXSTMPH);
- dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
+ netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n");
return;
}
@@ -626,10 +614,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
- dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+ netdev_err(netdev, "ptp_clock_register failed\n");
} else if (adapter->ptp_clock) {
- dev_info(&adapter->pdev->dev, "added PHC on %s\n",
- adapter->netdev->name);
+ netdev_info(netdev, "PHC added\n");
adapter->ptp_flags |= IGC_PTP_ENABLED;
}
}
@@ -666,8 +653,7 @@ void igc_ptp_stop(struct igc_adapter *adapter)
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
- dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
- adapter->netdev->name);
+ netdev_info(adapter->netdev, "PHC removed\n");
adapter->ptp_flags &= ~IGC_PTP_ENABLED;
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index d4af53a80f11..232e82dec62e 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -17,11 +17,6 @@
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
-#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */
@@ -35,10 +30,6 @@
#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */
#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */
#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */
-#define IGC_FCSTS 0x02464 /* FC Status - RO */
-
-/* PCIe Register Description */
-#define IGC_GCR 0x05B00 /* PCIe control- RW */
/* Semaphore registers */
#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
@@ -49,6 +40,7 @@
#define IGC_FACTPS 0x05B30
/* Interrupt Register Description */
+#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */
#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
@@ -76,13 +68,6 @@
#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */
#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */
-#define IGC_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
-#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
-#define IGC_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
-#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
-#define IGC_HGPTC 0x04118 /* Host Good Packets TX Count */
-#define IGC_HTCBDPC 0x04124 /* Host TX Circ.Breaker Drop Count */
-
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
@@ -119,10 +104,11 @@
#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */
#define IGC_RFCTL 0x05008 /* Receive Filter Control*/
#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define IGC_RA 0x05400 /* Receive Address - RW Array */
#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
-#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */
+#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */
/* Transmit Register Descriptions */
#define IGC_TCTL 0x00400 /* Tx Control - RW */
@@ -138,13 +124,9 @@
#define IGC_MMDAC 13 /* MMD Access Control */
#define IGC_MMDAAD 14 /* MMD Access Address/Data */
-/* Good transmitted packets counter registers */
-#define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
-
/* Statistics Register Descriptions */
#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
-#define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */
#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */
#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */
@@ -152,10 +134,10 @@
#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */
#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */
#define IGC_COLC 0x04028 /* Collision Count - R/clr */
+#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */
#define IGC_DC 0x04030 /* Defer Count - R/clr */
#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */
-#define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */
-#define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */
#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */
#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */
#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */
@@ -213,7 +195,6 @@
#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define IGC_LENERRS 0x04138 /* Length Errors Count */
-#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
/* Time sync registers */
#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */
@@ -229,7 +210,17 @@
#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
-#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+/* Transmit Scheduling Registers */
+#define IGC_TQAVCTRL 0x3570
+#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
+#define IGC_BASET_L 0x3314
+#define IGC_BASET_H 0x3318
+#define IGC_QBVCYCLET 0x331C
+#define IGC_QBVCYCLET_S 0x3320
+
+#define IGC_STQT(_n) (0x3324 + 0x4 * (_n))
+#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n))
+#define IGC_DTXMXPKTSZ 0x355C
/* System Time Registers */
#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */
@@ -265,8 +256,7 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
- if (!IGC_REMOVED(hw_addr)) \
- writel((val), &hw_addr[(reg)]); \
+ writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
new file mode 100644
index 000000000000..174103c4bea6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Intel Corporation */
+
+#include "igc.h"
+#include "igc_tsn.h"
+
+static bool is_any_launchtime(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ if (ring->launchtime_enable)
+ return true;
+ }
+
+ return false;
+}
+
+/* Returns the TSN specific registers to their default values after
+ * TSN offloading is disabled.
+ */
+static int igc_tsn_disable_offload(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 tqavctrl;
+ int i;
+
+ if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
+ return 0;
+
+ adapter->cycle_time = 0;
+
+ wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+ IGC_TQAVCTRL_ENHANCED_QAV);
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ ring->start_time = 0;
+ ring->end_time = 0;
+ ring->launchtime_enable = false;
+
+ wr32(IGC_TXQCTL(i), 0);
+ wr32(IGC_STQT(i), 0);
+ wr32(IGC_ENDQT(i), NSEC_PER_SEC);
+ }
+
+ wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
+ wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
+
+ adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
+
+ return 0;
+}
+
+static int igc_tsn_enable_offload(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 tqavctrl, baset_l, baset_h;
+ u32 sec, nsec, cycle;
+ ktime_t base_time, systim;
+ int i;
+
+ if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
+ return 0;
+
+ cycle = adapter->cycle_time;
+ base_time = adapter->base_time;
+
+ wr32(IGC_TSAUXC, 0);
+ wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+
+ tqavctrl = rd32(IGC_TQAVCTRL);
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ wr32(IGC_QBVCYCLET_S, cycle);
+ wr32(IGC_QBVCYCLET, cycle);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txqctl = 0;
+
+ wr32(IGC_STQT(i), ring->start_time);
+ wr32(IGC_ENDQT(i), ring->end_time);
+
+ if (adapter->base_time) {
+ /* If we have a base_time we are in "taprio"
+ * mode and we need to be strict about the
+ * cycles: only transmit a packet if it can be
+ * completed during that cycle.
+ */
+ txqctl |= IGC_TXQCTL_STRICT_CYCLE |
+ IGC_TXQCTL_STRICT_END;
+ }
+
+ if (ring->launchtime_enable)
+ txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+
+ wr32(IGC_TXQCTL(i), txqctl);
+ }
+
+ nsec = rd32(IGC_SYSTIML);
+ sec = rd32(IGC_SYSTIMH);
+
+ systim = ktime_set(sec, nsec);
+
+ if (ktime_compare(systim, base_time) > 0) {
+ s64 n;
+
+ n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
+ base_time = ktime_add_ns(base_time, (n + 1) * cycle);
+ }
+
+ baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
+
+ wr32(IGC_BASET_H, baset_h);
+ wr32(IGC_BASET_L, baset_l);
+
+ adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;
+
+ return 0;
+}
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter)
+{
+ bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);
+
+ if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
+ return 0;
+
+ if (!is_any_enabled) {
+ int err = igc_tsn_disable_offload(adapter);
+
+ if (err < 0)
+ return err;
+
+ /* The BASET registers aren't cleared when writing
+ * into them, force a reset if the interface is
+ * running.
+ */
+ if (netif_running(adapter->netdev))
+ schedule_work(&adapter->reset_task);
+
+ return 0;
+ }
+
+ return igc_tsn_enable_offload(adapter);
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
new file mode 100644
index 000000000000..f76bc86ddccd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Intel Corporation */
+
+#ifndef _IGC_TSN_H_
+#define _IGC_TSN_H_
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter);
+
+#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 2833e4f041ce..5ddfc83a1e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -224,17 +224,17 @@ struct ixgbe_tx_buffer {
};
struct ixgbe_rx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
union {
struct {
+ struct sk_buff *skb;
+ dma_addr_t dma;
struct page *page;
__u32 page_offset;
__u16 pagecnt_bias;
};
struct {
- void *addr;
- u64 handle;
+ bool discard;
+ struct xdp_buff *xdp;
};
};
};
@@ -351,7 +351,6 @@ struct ixgbe_ring {
};
struct xdp_rxq_info xdp_rxq;
struct xdp_umem *xsk_umem;
- struct zero_copy_allocator zca; /* ZC allocator anchor */
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 0bd1294ba517..17357a12cbdc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -64,8 +64,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
/* if link is down, assume supported */
if (link_up)
- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
- true : false;
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL;
else
supported = true;
}
@@ -2243,7 +2242,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
}
/* Configure pause time (2 TCs per register) */
- reg = hw->fc.pause_time * 0x00010001;
+ reg = hw->fc.pause_time * 0x00010001U;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 718931d951bc..f162b8b8f345 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -35,7 +35,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xfrm.h>
#include "ixgbe.h"
@@ -2215,7 +2215,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_PASS:
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
result = IXGBE_XDP_CONSUMED;
break;
@@ -2244,19 +2244,30 @@ xdp_out:
return ERR_PTR(-result);
}
+static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
unsigned int size)
{
+ unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
-
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -2290,6 +2301,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
+#endif
+
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
@@ -2323,7 +2339,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp.data_hard_start = xdp.data -
ixgbe_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
+#endif
skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
@@ -2954,35 +2973,6 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
/* skip the flush */
}
-static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
-{
- u32 mask;
- struct ixgbe_hw *hw = &adapter->hw;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- mask = (qmask & 0xFFFFFFFF);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
- mask = (qmask >> 32);
- if (mask)
- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
- break;
- default:
- break;
- }
- /* skip the flush */
-}
-
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
@@ -3726,8 +3716,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
/* configure the packet buffer length */
if (rx_ring->xsk_umem) {
- u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem);
/* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4074,11 +4063,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
if (ring->xsk_umem) {
- ring->zca.free = ixgbe_zca_free;
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &ring->zca));
-
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
@@ -4134,8 +4122,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
}
if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
- u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
- XDP_PACKET_HEADROOM;
+ u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 537dfff585e0..d05a5690e66b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -102,7 +102,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
* indirection table and RSS hash key with PF therefore
* we want to disable the querying by default.
*/
- adapter->vfinfo[i].rss_query_enabled = 0;
+ adapter->vfinfo[i].rss_query_enabled = false;
/* Untrust all VFs */
adapter->vfinfo[i].trusted = false;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 6d01700b46bc..7887ae4aaf4f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -35,7 +35,7 @@ int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
-void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
+bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
const int budget);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 74b540ebb3dc..be9d2a8da515 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -2,7 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ixgbe.h"
@@ -20,54 +20,11 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
return xdp_get_umem_from_qid(adapter->netdev, qid);
}
-static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem)
-{
- struct device *dev = &adapter->pdev->dev;
- unsigned int i, j;
- dma_addr_t dma;
-
- for (i = 0; i < umem->npgs; i++) {
- dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
- if (dma_mapping_error(dev, dma))
- goto out_unmap;
-
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-out_unmap:
- for (j = 0; j < i; j++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
- umem->pages[i].dma = 0;
- }
-
- return -1;
-}
-
-static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem)
-{
- struct device *dev = &adapter->pdev->dev;
- unsigned int i;
-
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
-
- umem->pages[i].dma = 0;
- }
-}
-
static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
struct xdp_umem *umem,
u16 qid)
{
struct net_device *netdev = adapter->netdev;
- struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
@@ -78,13 +35,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
- if (!reuseq)
- return -ENOMEM;
-
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- err = ixgbe_xsk_umem_dma_map(adapter, umem);
+ err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
if (err)
return err;
@@ -124,7 +75,7 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
ixgbe_txrx_ring_disable(adapter, qid);
clear_bit(qid, adapter->af_xdp_zc_qps);
- ixgbe_xsk_umem_dma_unmap(adapter, umem);
+ xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR);
if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
@@ -143,25 +94,20 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
{
- struct xdp_umem *umem = rx_ring->xsk_umem;
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
- u64 offset;
u32 act;
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- offset = xdp->data - xdp->data_hard_start;
-
- xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
result = IXGBE_XDP_CONSUMED;
break;
@@ -186,140 +132,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
return result;
}
-static struct
-ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
- unsigned int size)
-{
- struct ixgbe_rx_buffer *bi;
-
- bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- bi->dma, 0,
- size,
- DMA_BIDIRECTIONAL);
-
- return bi;
-}
-
-static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *obi)
-{
- u16 nta = rx_ring->next_to_alloc;
- struct ixgbe_rx_buffer *nbi;
-
- nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- nbi->dma = obi->dma;
- nbi->addr = obi->addr;
- nbi->handle = obi->handle;
-
- obi->addr = NULL;
- obi->skb = NULL;
-}
-
-void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
-{
- struct ixgbe_rx_buffer *bi;
- struct ixgbe_ring *rx_ring;
- u64 hr, mask;
- u16 nta;
-
- rx_ring = container_of(alloc, struct ixgbe_ring, zca);
- hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
- mask = rx_ring->xsk_umem->chunk_mask;
-
- nta = rx_ring->next_to_alloc;
- bi = rx_ring->rx_buffer_info;
-
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- handle &= mask;
-
- bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
- rx_ring->xsk_umem->headroom);
-}
-
-static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- void *addr = bi->addr;
- u64 handle, hr;
-
- if (addr)
- return true;
-
- if (!xsk_umem_peek_addr(umem, &handle)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
- return false;
- }
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- bi->dma = xdp_umem_get_dma(umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
-
- xsk_umem_release_addr(umem);
- return true;
-}
-
-static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
-{
- struct xdp_umem *umem = rx_ring->xsk_umem;
- u64 handle, hr;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
- return false;
- }
-
- handle &= rx_ring->xsk_umem->chunk_mask;
-
- hr = umem->headroom + XDP_PACKET_HEADROOM;
-
- bi->dma = xdp_umem_get_dma(umem, handle);
- bi->dma += hr;
-
- bi->addr = xdp_umem_get_data(umem, handle);
- bi->addr += hr;
-
- bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
-
- xsk_umem_release_addr_rq(umem);
- return true;
-}
-
-static __always_inline bool
-__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
- bool alloc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi))
+bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ dma_addr_t dma;
bool ok = true;
/* nothing to do */
- if (!cleaned_count)
+ if (!count)
return true;
rx_desc = IXGBE_RX_DESC(rx_ring, i);
@@ -327,21 +149,18 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
i -= rx_ring->count;
do {
- if (!alloc(rx_ring, bi)) {
+ bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ if (!bi->xdp) {
ok = false;
break;
}
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_xdp_get_dma(bi->xdp);
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc++;
bi++;
@@ -355,17 +174,14 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
/* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0;
- cleaned_count--;
- } while (cleaned_count);
+ count--;
+ } while (count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = i;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -378,40 +194,27 @@ __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
return ok;
}
-void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
-{
- __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
- ixgbe_alloc_buffer_slow_zc);
-}
-
-static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
- u16 count)
-{
- return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
- ixgbe_alloc_buffer_zc);
-}
-
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi,
- struct xdp_buff *xdp)
+ struct ixgbe_rx_buffer *bi)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
+ unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
struct sk_buff *skb;
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ bi->xdp->data_end - bi->xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
return skb;
}
@@ -434,9 +237,6 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
struct sk_buff *skb;
- struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
@@ -446,8 +246,8 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
failure = failure ||
- !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
- cleaned_count);
+ !ixgbe_alloc_rx_buffers_zc(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -462,42 +262,40 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
- bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
+ bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
if (unlikely(!ixgbe_test_staterr(rx_desc,
IXGBE_RXD_STAT_EOP))) {
struct ixgbe_rx_buffer *next_bi;
- ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
ixgbe_inc_ntc(rx_ring);
next_bi =
&rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- next_bi->skb = ERR_PTR(-EINVAL);
+ next_bi->discard = true;
continue;
}
- if (unlikely(bi->skb)) {
- ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ if (unlikely(bi->discard)) {
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+ bi->discard = false;
ixgbe_inc_ntc(rx_ring);
continue;
}
- xdp.data = bi->addr;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + size;
- xdp.handle = bi->handle;
-
- xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
+ bi->xdp->data_end = bi->xdp->data + size;
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
+ xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (xdp_res) {
- if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+ if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
xdp_xmit |= xdp_res;
- bi->addr = NULL;
- bi->skb = NULL;
- } else {
- ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
- }
+ else
+ xsk_buff_free(bi->xdp);
+
+ bi->xdp = NULL;
total_rx_packets++;
total_rx_bytes += size;
@@ -507,7 +305,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
/* XDP_PASS path */
- skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
+ skb = ixgbe_construct_skb_zc(rx_ring, bi);
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
@@ -559,17 +357,17 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- u16 i = rx_ring->next_to_clean;
- struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
+ struct ixgbe_rx_buffer *bi;
+ u16 i;
- while (i != rx_ring->next_to_alloc) {
- xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
- i++;
- bi++;
- if (i == rx_ring->count) {
- i = 0;
- bi = rx_ring->rx_buffer_info;
- }
+ for (i = 0; i < rx_ring->count; i++) {
+ bi = &rx_ring->rx_buffer_info[i];
+
+ if (!bi->xdp)
+ continue;
+
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
}
}
@@ -592,10 +390,9 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break;
- dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
-
- dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
- DMA_BIDIRECTIONAL);
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ desc.len);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4622c4ea2e46..a39e2cb384dd 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1095,19 +1095,31 @@ xdp_out:
return ERR_PTR(-result);
}
+static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
- SKB_DATA_ALIGN(size);
-
rx_buffer->page_offset += truesize;
#endif
}
@@ -1125,6 +1137,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
xdp.rxq = &rx_ring->xdp_rxq;
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
+#endif
+
while (likely(total_rx_packets < budget)) {
struct ixgbevf_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
@@ -1157,7 +1174,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
xdp.data_hard_start = xdp.data -
ixgbevf_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
-
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
+#endif
skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 900affbdcc0e..1645e4e7ebdb 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -276,7 +276,8 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
return pkts;
}
-static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
{
struct xrx200_priv *priv = netdev_priv(net_dev);
struct xrx200_chan *ch = &priv->chan_tx;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 81d24481b22c..4d4b6243318a 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -666,11 +666,6 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
return 0;
}
-static inline __be16 sum16_as_be(__sum16 sum)
-{
- return (__force __be16)sum;
-}
-
static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
u16 *l4i_chk, u32 *command, int length)
{
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 51889770958d..011cd26953d9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -324,7 +324,8 @@
ETH_HLEN + ETH_FCS_LEN, \
cache_line_size())
-#define MVNETA_SKB_HEADROOM max(XDP_PACKET_HEADROOM, NET_SKB_PAD)
+/* Driver assumes that the last 3 bits are 0 */
+#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) & ~0x7)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
@@ -2072,7 +2073,7 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
int cpu;
u32 ret;
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return MVNETA_XDP_DROPPED;
@@ -2148,12 +2149,17 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
struct mvneta_stats *stats)
{
- unsigned int len;
+ unsigned int len, sync;
+ struct page *page;
u32 ret, act;
len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
act = bpf_prog_run_xdp(prog, xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+ sync = max(sync, len);
+
switch (act) {
case XDP_PASS:
stats->xdp_pass++;
@@ -2164,9 +2170,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
ret = MVNETA_XDP_DROPPED;
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
} else {
ret = MVNETA_XDP_REDIR;
stats->xdp_redirect++;
@@ -2175,10 +2180,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
}
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
- if (ret != MVNETA_XDP_TX)
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ if (ret != MVNETA_XDP_TX) {
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ }
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2187,8 +2192,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
/* fall through */
case XDP_DROP:
- page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data), len, true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2320,6 +2325,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rcu_read_lock();
xdp_prog = READ_ONCE(pp->xdp_prog);
xdp_buf.rxq = &rxq->xdp_rxq;
+ xdp_buf.frame_sz = PAGE_SIZE;
/* Fairness NAPI loop */
while (rx_proc < budget && rx_proc < rx_todo) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f1d2dea90a8c..5975521a4c86 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -379,40 +379,35 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp)
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
{
dma_addr_t iova;
+ u8 *buf;
- /* Check if request can be accommodated in previous allocated page */
- if (pool->page && ((pool->page_offset + pool->rbsize) <=
- (PAGE_SIZE << pool->rbpage_order))) {
- pool->pageref++;
- goto ret;
- }
-
- otx2_get_page(pool);
-
- /* Allocate a new page */
- pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- pool->rbpage_order);
- if (unlikely(!pool->page))
+ buf = napi_alloc_frag(pool->rbsize);
+ if (unlikely(!buf))
return -ENOMEM;
- pool->page_offset = 0;
-ret:
- iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
- pool->rbsize, DMA_FROM_DEVICE);
- if (!iova) {
- if (!pool->page_offset)
- __free_pages(pool->page, pool->rbpage_order);
- pool->page = NULL;
+ iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ page_frag_free(buf);
return -ENOMEM;
}
- pool->page_offset += pool->rbsize;
+
return iova;
}
+static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+{
+ dma_addr_t addr;
+
+ local_bh_disable();
+ addr = __otx2_alloc_rbuf(pfvf, pool);
+ local_bh_enable();
+ return addr;
+}
+
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -805,7 +800,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, rbpool);
if (bufptr <= 0) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
@@ -1064,7 +1059,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
return err;
pool->rbsize = buf_size;
- pool->rbpage_order = get_order(buf_size);
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
@@ -1152,13 +1146,12 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
- otx2_get_page(pool);
}
return 0;
@@ -1204,13 +1197,12 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
+ bufptr = otx2_alloc_rbuf(pfvf, pool);
if (bufptr <= 0)
return bufptr;
otx2_aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
- otx2_get_page(pool);
}
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 018c283a0ac4..2fa29889522e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -309,7 +309,7 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
default:
blkaddr = BLKADDR_RVUM;
break;
- };
+ }
offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
@@ -434,18 +434,6 @@ static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
}
-/* Update page ref count */
-static inline void otx2_get_page(struct otx2_pool *pool)
-{
- if (!pool->page)
- return;
-
- if (pool->pageref)
- page_ref_add(pool->page, pool->pageref);
- pool->pageref = 0;
- pool->page = NULL;
-}
-
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
{
if (type == AURA_NIX_SQ)
@@ -589,8 +577,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- gfp_t gfp);
+dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 411e5ea1031e..64786568af0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1856,13 +1856,17 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
- if (!hw->irq_name)
+ if (!hw->irq_name) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
sizeof(cpumask_var_t), GFP_KERNEL);
- if (!hw->affinity_mask)
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
/* Map CSRs */
pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 45abe0cd0e7b..b04f5429d72d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -286,7 +286,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
+ bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
if (unlikely(bufptr <= 0)) {
struct refill_work *work;
struct delayed_work *dwork;
@@ -304,7 +304,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
- otx2_get_page(cq->rbpool);
return processed_cqe;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 4ab32d3adb78..da97f2d4416f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -113,11 +113,7 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
- u8 rbpage_order;
u16 rbsize;
- u32 page_offset;
- u16 pageref;
- struct page *page;
};
struct otx2_cq_queue {
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 4968352ba188..500c15e7ea4a 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
- bool "MediaTek ethernet driver"
+ bool "MediaTek devices"
depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
---help---
If you have a Mediatek SoC with ethernet, say Y.
@@ -14,4 +14,11 @@ config NET_MEDIATEK_SOC
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
+config NET_MEDIATEK_STAR_EMAC
+ tristate "MediaTek STAR Ethernet MAC support"
+ select PHYLIB
+ help
+ This driver supports the ethernet MAC IP first used on
+ MediaTek MT85** SoCs.
+
endif #NET_VENDOR_MEDIATEK
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 2d8362f9341b..3a777b4a6cd3 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -3,5 +3,6 @@
# Makefile for the Mediatek SoCs built-in ethernet macs
#
-obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o
+obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 09047109d0da..f6a1f8666f95 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -65,7 +65,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
-u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
{
u32 val;
@@ -1122,7 +1122,7 @@ static void mtk_stop_queue(struct mtk_eth *eth)
}
}
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
new file mode 100644
index 000000000000..f1ace4fec19f
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -0,0 +1,1651 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 MediaTek Corporation
+ * Copyright (c) 2020 BayLibre SAS
+ *
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define MTK_STAR_DRVNAME "mtk_star_emac"
+
+#define MTK_STAR_WAIT_TIMEOUT 300
+#define MTK_STAR_MAX_FRAME_SIZE 1514
+#define MTK_STAR_SKB_ALIGNMENT 16
+#define MTK_STAR_NAPI_WEIGHT 64
+#define MTK_STAR_HASHTABLE_MC_LIMIT 256
+#define MTK_STAR_HASHTABLE_SIZE_MAX 512
+
+/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
+ * work for this controller.
+ */
+#define MTK_STAR_IP_ALIGN 2
+
+static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
+#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
+
+/* PHY Control Register 0 */
+#define MTK_STAR_REG_PHY_CTRL0 0x0000
+#define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13)
+#define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14)
+#define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15)
+#define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8)
+#define MTK_STAR_OFF_PHY_CTRL0_PREG 8
+#define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16)
+#define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16
+
+/* PHY Control Register 1 */
+#define MTK_STAR_REG_PHY_CTRL1 0x0004
+#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0)
+#define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8)
+#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11)
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12)
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13)
+
+/* MAC Configuration Register */
+#define MTK_STAR_REG_MAC_CFG 0x0008
+#define MTK_STAR_OFF_MAC_CFG_IPG 10
+#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0)
+#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16)
+#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19)
+#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20)
+#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22)
+#define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31)
+
+/* Flow-Control Configuration Register */
+#define MTK_STAR_REG_FC_CFG 0x000c
+#define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7)
+#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8)
+#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16
+#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16)
+#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800
+
+/* ARL Configuration Register */
+#define MTK_STAR_REG_ARL_CFG 0x0010
+#define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0)
+#define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4)
+
+/* MAC High and Low Bytes Registers */
+#define MTK_STAR_REG_MY_MAC_H 0x0014
+#define MTK_STAR_REG_MY_MAC_L 0x0018
+
+/* Hash Table Control Register */
+#define MTK_STAR_REG_HASH_CTRL 0x001c
+#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0)
+#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12)
+#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13)
+#define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31)
+
+/* TX DMA Control Register */
+#define MTK_STAR_REG_TX_DMA_CTRL 0x0034
+#define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0)
+#define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1)
+#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2)
+
+/* RX DMA Control Register */
+#define MTK_STAR_REG_RX_DMA_CTRL 0x0038
+#define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0)
+#define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1)
+#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2)
+
+/* DMA Address Registers */
+#define MTK_STAR_REG_TX_DPTR 0x003c
+#define MTK_STAR_REG_RX_DPTR 0x0040
+#define MTK_STAR_REG_TX_BASE_ADDR 0x0044
+#define MTK_STAR_REG_RX_BASE_ADDR 0x0048
+
+/* Interrupt Status Register */
+#define MTK_STAR_REG_INT_STS 0x0050
+#define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2)
+#define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3)
+#define MTK_STAR_BIT_INT_STS_FNRC BIT(6)
+#define MTK_STAR_BIT_INT_STS_TNTC BIT(8)
+
+/* Interrupt Mask Register */
+#define MTK_STAR_REG_INT_MASK 0x0054
+#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
+
+/* Misc. Config Register */
+#define MTK_STAR_REG_TEST1 0x005c
+#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
+
+/* Extended Configuration Register */
+#define MTK_STAR_REG_EXT_CFG 0x0060
+#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16
+#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16)
+#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400
+
+/* EthSys Configuration Register */
+#define MTK_STAR_REG_SYS_CONF 0x0094
+#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0)
+#define MTK_STAR_BIT_EXT_MDC_MODE BIT(1)
+#define MTK_STAR_BIT_SWC_MII_MODE BIT(2)
+
+/* MAC Clock Configuration Register */
+#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
+#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
+#define MTK_STAR_BIT_CLK_DIV_10 0x0a
+
+/* Counter registers. */
+#define MTK_STAR_REG_C_RXOKPKT 0x0100
+#define MTK_STAR_REG_C_RXOKBYTE 0x0104
+#define MTK_STAR_REG_C_RXRUNT 0x0108
+#define MTK_STAR_REG_C_RXLONG 0x010c
+#define MTK_STAR_REG_C_RXDROP 0x0110
+#define MTK_STAR_REG_C_RXCRC 0x0114
+#define MTK_STAR_REG_C_RXARLDROP 0x0118
+#define MTK_STAR_REG_C_RXVLANDROP 0x011c
+#define MTK_STAR_REG_C_RXCSERR 0x0120
+#define MTK_STAR_REG_C_RXPAUSE 0x0124
+#define MTK_STAR_REG_C_TXOKPKT 0x0128
+#define MTK_STAR_REG_C_TXOKBYTE 0x012c
+#define MTK_STAR_REG_C_TXPAUSECOL 0x0130
+#define MTK_STAR_REG_C_TXRTY 0x0134
+#define MTK_STAR_REG_C_TXSKIP 0x0138
+#define MTK_STAR_REG_C_TX_ARP 0x013c
+#define MTK_STAR_REG_C_RX_RERR 0x01d8
+#define MTK_STAR_REG_C_RX_UNI 0x01dc
+#define MTK_STAR_REG_C_RX_MULTI 0x01e0
+#define MTK_STAR_REG_C_RX_BROAD 0x01e4
+#define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8
+#define MTK_STAR_REG_C_TX_UNI 0x01ec
+#define MTK_STAR_REG_C_TX_MULTI 0x01f0
+#define MTK_STAR_REG_C_TX_BROAD 0x01f4
+#define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8
+#define MTK_STAR_REG_C_TX_LATECOL 0x01fc
+#define MTK_STAR_REG_C_RX_LENGTHERR 0x0214
+#define MTK_STAR_REG_C_RX_TWIST 0x0218
+
+/* Ethernet CFG Control */
+#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
+#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
+
+/* Represents the actual structure of descriptors used by the MAC. We can
+ * reuse the same structure for both TX and RX - the layout is the same, only
+ * the flags differ slightly.
+ */
+struct mtk_star_ring_desc {
+ /* Contains both the status flags as well as packet length. */
+ u32 status;
+ u32 data_ptr;
+ u32 vtag;
+ u32 reserved;
+};
+
+#define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0)
+#define MTK_STAR_DESC_BIT_RX_CRCE BIT(24)
+#define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25)
+#define MTK_STAR_DESC_BIT_INT BIT(27)
+#define MTK_STAR_DESC_BIT_LS BIT(28)
+#define MTK_STAR_DESC_BIT_FS BIT(29)
+#define MTK_STAR_DESC_BIT_EOR BIT(30)
+#define MTK_STAR_DESC_BIT_COWN BIT(31)
+
+/* Helper structure for storing data read from/written to descriptors in order
+ * to limit reads from/writes to DMA memory.
+ */
+struct mtk_star_ring_desc_data {
+ unsigned int len;
+ unsigned int flags;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
+
+#define MTK_STAR_RING_NUM_DESCS 128
+#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
+#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
+#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
+#define MTK_STAR_DMA_SIZE \
+ (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
+
+struct mtk_star_ring {
+ struct mtk_star_ring_desc *descs;
+ struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
+ dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct mtk_star_priv {
+ struct net_device *ndev;
+
+ struct regmap *regs;
+ struct regmap *pericfg;
+
+ struct clk_bulk_data clks[MTK_STAR_NCLKS];
+
+ void *ring_base;
+ struct mtk_star_ring_desc *descs_base;
+ dma_addr_t dma_addr;
+ struct mtk_star_ring tx_ring;
+ struct mtk_star_ring rx_ring;
+
+ struct mii_bus *mii;
+ struct napi_struct napi;
+
+ struct device_node *phy_node;
+ phy_interface_t phy_intf;
+ struct phy_device *phydev;
+ unsigned int link;
+ int speed;
+ int duplex;
+ int pause;
+
+ /* Protects against concurrent descriptor access. */
+ spinlock_t lock;
+
+ struct rtnl_link_stats64 stats;
+ struct work_struct stats_work;
+};
+
+static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
+{
+ return priv->ndev->dev.parent;
+}
+
+static const struct regmap_config mtk_star_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static void mtk_star_ring_init(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc *descs)
+{
+ memset(ring, 0, sizeof(*ring));
+ ring->descs = descs;
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
+ unsigned int status;
+
+ status = READ_ONCE(desc->status);
+ dma_rmb(); /* Make sure we read the status bits before checking it. */
+
+ if (!(status & MTK_STAR_DESC_BIT_COWN))
+ return -1;
+
+ desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
+ desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
+ desc_data->dma_addr = ring->dma_addrs[ring->tail];
+ desc_data->skb = ring->skbs[ring->tail];
+
+ ring->dma_addrs[ring->tail] = 0;
+ ring->skbs[ring->tail] = NULL;
+
+ status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
+
+ WRITE_ONCE(desc->data_ptr, 0);
+ WRITE_ONCE(desc->status, status);
+
+ ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
+
+ return 0;
+}
+
+static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data,
+ unsigned int flags)
+{
+ struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
+ unsigned int status;
+
+ status = READ_ONCE(desc->status);
+
+ ring->skbs[ring->head] = desc_data->skb;
+ ring->dma_addrs[ring->head] = desc_data->dma_addr;
+
+ status |= desc_data->len;
+ if (flags)
+ status |= flags;
+
+ WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
+ WRITE_ONCE(desc->status, status);
+ status &= ~MTK_STAR_DESC_BIT_COWN;
+ /* Flush previous modifications before ownership change. */
+ dma_wmb();
+ WRITE_ONCE(desc->status, status);
+
+ ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
+}
+
+static void
+mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ mtk_star_ring_push_head(ring, desc_data, 0);
+}
+
+static void
+mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
+ MTK_STAR_DESC_BIT_LS |
+ MTK_STAR_DESC_BIT_INT;
+
+ mtk_star_ring_push_head(ring, desc_data, flags);
+}
+
+static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
+{
+ return abs(ring->head - ring->tail);
+}
+
+static bool mtk_star_ring_full(struct mtk_star_ring *ring)
+{
+ return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
+}
+
+static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
+{
+ return mtk_star_ring_num_used_descs(ring) > 0;
+}
+
+static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
+ struct sk_buff *skb)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
+ return dma_map_single(dev, skb_tail_pointer(skb) - 2,
+ skb_tailroom(skb), DMA_FROM_DEVICE);
+}
+
+static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ dma_unmap_single(dev, desc_data->dma_addr,
+ skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
+}
+
+static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
+ struct sk_buff *skb)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+}
+
+static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ return dma_unmap_single(dev, desc_data->dma_addr,
+ skb_headlen(desc_data->skb), DMA_TO_DEVICE);
+}
+
+static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
+ MTK_STAR_BIT_MAC_CFG_NIC_PD);
+}
+
+/* Unmask the three interrupts we care about, mask all others. */
+static void mtk_star_intr_enable(struct mtk_star_priv *priv)
+{
+ unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
+ MTK_STAR_BIT_INT_STS_FNRC |
+ MTK_STAR_REG_INT_STS_MIB_CNT_TH;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
+}
+
+static void mtk_star_intr_disable(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
+}
+
+static void mtk_star_intr_enable_tx(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_BIT_INT_STS_TNTC);
+}
+
+static void mtk_star_intr_enable_rx(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_BIT_INT_STS_FNRC);
+}
+
+static void mtk_star_intr_enable_stats(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_REG_INT_STS_MIB_CNT_TH);
+}
+
+static void mtk_star_intr_disable_tx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_BIT_INT_STS_TNTC);
+}
+
+static void mtk_star_intr_disable_rx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_BIT_INT_STS_FNRC);
+}
+
+static void mtk_star_intr_disable_stats(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
+ MTK_STAR_REG_INT_STS_MIB_CNT_TH);
+}
+
+static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
+
+ return val;
+}
+
+static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ val = mtk_star_intr_read(priv);
+ regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
+
+ return val;
+}
+
+static void mtk_star_dma_init(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring_desc *desc;
+ unsigned int val;
+ int i;
+
+ priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
+
+ for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
+ desc = &priv->descs_base[i];
+
+ memset(desc, 0, sizeof(*desc));
+ desc->status = MTK_STAR_DESC_BIT_COWN;
+ if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
+ (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
+ desc->status |= MTK_STAR_DESC_BIT_EOR;
+ }
+
+ mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
+ mtk_star_ring_init(&priv->rx_ring,
+ priv->descs_base + MTK_STAR_NUM_TX_DESCS);
+
+ /* Set DMA pointers. */
+ val = (unsigned int)priv->dma_addr;
+ regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
+ regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
+
+ val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
+ regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
+ regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
+}
+
+static void mtk_star_dma_start(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_START);
+ regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_START);
+}
+
+static void mtk_star_dma_stop(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_STOP);
+ regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_STOP);
+}
+
+static void mtk_star_dma_disable(struct mtk_star_priv *priv)
+{
+ int i;
+
+ mtk_star_dma_stop(priv);
+
+ /* Take back all descriptors. */
+ for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
+ priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
+}
+
+static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
+}
+
+static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
+}
+
+static void mtk_star_set_mac_addr(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ u8 *mac_addr = ndev->dev_addr;
+ unsigned int high, low;
+
+ high = mac_addr[0] << 8 | mac_addr[1] << 0;
+ low = mac_addr[2] << 24 | mac_addr[3] << 16 |
+ mac_addr[4] << 8 | mac_addr[5];
+
+ regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
+ regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
+}
+
+static void mtk_star_reset_counters(struct mtk_star_priv *priv)
+{
+ static const unsigned int counter_regs[] = {
+ MTK_STAR_REG_C_RXOKPKT,
+ MTK_STAR_REG_C_RXOKBYTE,
+ MTK_STAR_REG_C_RXRUNT,
+ MTK_STAR_REG_C_RXLONG,
+ MTK_STAR_REG_C_RXDROP,
+ MTK_STAR_REG_C_RXCRC,
+ MTK_STAR_REG_C_RXARLDROP,
+ MTK_STAR_REG_C_RXVLANDROP,
+ MTK_STAR_REG_C_RXCSERR,
+ MTK_STAR_REG_C_RXPAUSE,
+ MTK_STAR_REG_C_TXOKPKT,
+ MTK_STAR_REG_C_TXOKBYTE,
+ MTK_STAR_REG_C_TXPAUSECOL,
+ MTK_STAR_REG_C_TXRTY,
+ MTK_STAR_REG_C_TXSKIP,
+ MTK_STAR_REG_C_TX_ARP,
+ MTK_STAR_REG_C_RX_RERR,
+ MTK_STAR_REG_C_RX_UNI,
+ MTK_STAR_REG_C_RX_MULTI,
+ MTK_STAR_REG_C_RX_BROAD,
+ MTK_STAR_REG_C_RX_ALIGNERR,
+ MTK_STAR_REG_C_TX_UNI,
+ MTK_STAR_REG_C_TX_MULTI,
+ MTK_STAR_REG_C_TX_BROAD,
+ MTK_STAR_REG_C_TX_TIMEOUT,
+ MTK_STAR_REG_C_TX_LATECOL,
+ MTK_STAR_REG_C_RX_LENGTHERR,
+ MTK_STAR_REG_C_RX_TWIST,
+ };
+
+ unsigned int i, val;
+
+ for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
+ regmap_read(priv->regs, counter_regs[i], &val);
+}
+
+static void mtk_star_update_stat(struct mtk_star_priv *priv,
+ unsigned int reg, u64 *stat)
+{
+ unsigned int val;
+
+ regmap_read(priv->regs, reg, &val);
+ *stat += val;
+}
+
+/* Try to get as many stats as possible from the internal registers instead
+ * of tracking them ourselves.
+ */
+static void mtk_star_update_stats(struct mtk_star_priv *priv)
+{
+ struct rtnl_link_stats64 *stats = &priv->stats;
+
+ /* OK packets and bytes. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
+
+ /* RX & TX multicast. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
+
+ /* Collisions. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
+ &stats->collisions);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
+ &stats->collisions);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
+
+ /* RX Errors. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
+ &stats->rx_length_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
+ &stats->rx_over_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
+ &stats->rx_frame_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
+ &stats->rx_fifo_errors);
+ /* Sum of the general RX error counter + all of the above. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
+ stats->rx_errors += stats->rx_length_errors;
+ stats->rx_errors += stats->rx_over_errors;
+ stats->rx_errors += stats->rx_crc_errors;
+ stats->rx_errors += stats->rx_frame_errors;
+ stats->rx_errors += stats->rx_fifo_errors;
+}
+
+/* This runs in process context and parallel TX and RX paths executing in
+ * napi context may result in losing some stats data but this should happen
+ * seldom enough to be acceptable.
+ */
+static void mtk_star_update_stats_work(struct work_struct *work)
+{
+ struct mtk_star_priv *priv = container_of(work, struct mtk_star_priv,
+ stats_work);
+
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
+ mtk_star_intr_enable_stats(priv);
+}
+
+static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
+{
+ uintptr_t tail, offset;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
+ if (!skb)
+ return NULL;
+
+ /* Align to 16 bytes. */
+ tail = (uintptr_t)skb_tail_pointer(skb);
+ if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
+ offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
+ skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
+ }
+
+ /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
+ * extract the Ethernet header (14 bytes) so we need two more bytes.
+ */
+ skb_reserve(skb, MTK_STAR_IP_ALIGN);
+
+ return skb;
+}
+
+static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct mtk_star_ring *ring = &priv->rx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int i;
+
+ for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
+ skb = mtk_star_alloc_skb(ndev);
+ if (!skb)
+ return -ENOMEM;
+
+ dma_addr = mtk_star_dma_map_rx(priv, skb);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ desc = &ring->descs[i];
+ desc->data_ptr = dma_addr;
+ desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
+ desc->status &= ~MTK_STAR_DESC_BIT_COWN;
+ ring->skbs[i] = skb;
+ ring->dma_addrs[i] = dma_addr;
+ }
+
+ return 0;
+}
+
+static void
+mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
+ void (*unmap_func)(struct mtk_star_priv *,
+ struct mtk_star_ring_desc_data *))
+{
+ struct mtk_star_ring_desc_data desc_data;
+ int i;
+
+ for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
+ if (!ring->dma_addrs[i])
+ continue;
+
+ desc_data.dma_addr = ring->dma_addrs[i];
+ desc_data.skb = ring->skbs[i];
+
+ unmap_func(priv, &desc_data);
+ dev_kfree_skb(desc_data.skb);
+ }
+}
+
+static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->rx_ring;
+
+ mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
+}
+
+static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->tx_ring;
+
+ mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
+}
+
+/* All processing for TX and RX happens in the napi poll callback. */
+static irqreturn_t mtk_star_handle_irq(int irq, void *data)
+{
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+ bool need_napi = false;
+ unsigned int status;
+
+ ndev = data;
+ priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ status = mtk_star_intr_read(priv);
+
+ if (status & MTK_STAR_BIT_INT_STS_TNTC) {
+ mtk_star_intr_disable_tx(priv);
+ need_napi = true;
+ }
+
+ if (status & MTK_STAR_BIT_INT_STS_FNRC) {
+ mtk_star_intr_disable_rx(priv);
+ need_napi = true;
+ }
+
+ if (need_napi)
+ napi_schedule(&priv->napi);
+
+ /* One of the counters reached 0x8000000 - update stats and
+ * reset all counters.
+ */
+ if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
+ mtk_star_intr_disable_stats(priv);
+ schedule_work(&priv->stats_work);
+ }
+
+ mtk_star_intr_ack_all(priv);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for the completion of any previous command - CMD_START bit must be
+ * cleared by hardware.
+ */
+static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout_atomic(priv->regs,
+ MTK_STAR_REG_HASH_CTRL, val,
+ !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
+ 10, MTK_STAR_WAIT_TIMEOUT);
+}
+
+static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+ int ret;
+
+ /* Wait for BIST_DONE bit. */
+ ret = regmap_read_poll_timeout_atomic(priv->regs,
+ MTK_STAR_REG_HASH_CTRL, val,
+ val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
+ 10, MTK_STAR_WAIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Check the BIST_OK bit. */
+ if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
+ MTK_STAR_BIT_HASH_CTRL_BIST_OK))
+ return -EIO;
+
+ return 0;
+}
+
+static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
+ unsigned int hash_addr)
+{
+ unsigned int val;
+ int ret;
+
+ ret = mtk_star_hash_wait_cmd_start(priv);
+ if (ret)
+ return ret;
+
+ val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
+ val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
+ val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
+ val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
+ val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
+ regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
+
+ return mtk_star_hash_wait_ok(priv);
+}
+
+static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
+{
+ int ret;
+
+ ret = mtk_star_hash_wait_cmd_start(priv);
+ if (ret)
+ return ret;
+
+ regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
+ MTK_STAR_BIT_HASH_CTRL_BIST_EN);
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
+ MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
+
+ return mtk_star_hash_wait_ok(priv);
+}
+
+static void mtk_star_phy_config(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ if (priv->speed == SPEED_1000)
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
+ else if (priv->speed == SPEED_100)
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
+ else
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
+ val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
+
+ val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ /* Only full-duplex supported for now. */
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
+
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
+
+ if (priv->pause) {
+ val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
+ val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
+ val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
+ } else {
+ val = 0;
+ }
+
+ regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
+ MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
+ MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
+
+ if (priv->pause) {
+ val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
+ val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
+ } else {
+ val = 0;
+ }
+
+ regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
+ MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
+}
+
+static void mtk_star_adjust_link(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ bool new_state = false;
+
+ if (phydev->link) {
+ if (!priv->link) {
+ priv->link = phydev->link;
+ new_state = true;
+ }
+
+ if (priv->speed != phydev->speed) {
+ priv->speed = phydev->speed;
+ new_state = true;
+ }
+
+ if (priv->pause != phydev->pause) {
+ priv->pause = phydev->pause;
+ new_state = true;
+ }
+ } else {
+ if (priv->link) {
+ priv->link = phydev->link;
+ new_state = true;
+ }
+ }
+
+ if (new_state) {
+ if (phydev->link)
+ mtk_star_phy_config(priv);
+
+ phy_print_status(ndev->phydev);
+ }
+}
+
+static void mtk_star_init_config(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
+ MTK_STAR_BIT_EXT_MDC_MODE |
+ MTK_STAR_BIT_SWC_MII_MODE);
+
+ regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
+ regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
+ MTK_STAR_MSK_MAC_CLK_CONF,
+ MTK_STAR_BIT_CLK_DIV_10);
+}
+
+static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
+{
+ regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
+ MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
+ MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
+}
+
+static int mtk_star_enable(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int val;
+ int ret;
+
+ mtk_star_nic_disable_pd(priv);
+ mtk_star_intr_disable(priv);
+ mtk_star_dma_stop(priv);
+
+ mtk_star_set_mac_addr(ndev);
+
+ /* Configure the MAC */
+ val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
+ val <<= MTK_STAR_OFF_MAC_CFG_IPG;
+ val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
+ val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
+ val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
+ regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
+
+ /* Enable Hash Table BIST and reset it */
+ ret = mtk_star_reset_hash_table(priv);
+ if (ret)
+ return ret;
+
+ /* Setup the hashing algorithm */
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
+ MTK_STAR_BIT_ARL_CFG_HASH_ALG |
+ MTK_STAR_BIT_ARL_CFG_MISC_MODE);
+
+ /* Don't strip VLAN tags */
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
+ MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
+
+ /* Setup DMA */
+ mtk_star_dma_init(priv);
+
+ ret = mtk_star_prepare_rx_skbs(ndev);
+ if (ret)
+ goto err_out;
+
+ /* Request the interrupt */
+ ret = request_irq(ndev->irq, mtk_star_handle_irq,
+ IRQF_TRIGGER_FALLING, ndev->name, ndev);
+ if (ret)
+ goto err_free_skbs;
+
+ napi_enable(&priv->napi);
+
+ mtk_star_intr_ack_all(priv);
+ mtk_star_intr_enable(priv);
+
+ /* Connect to and start PHY */
+ priv->phydev = of_phy_connect(ndev, priv->phy_node,
+ mtk_star_adjust_link, 0, priv->phy_intf);
+ if (!priv->phydev) {
+ netdev_err(ndev, "failed to connect to PHY\n");
+ goto err_free_irq;
+ }
+
+ mtk_star_dma_start(priv);
+ phy_start(priv->phydev);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_free_irq:
+ free_irq(ndev->irq, ndev);
+err_free_skbs:
+ mtk_star_free_rx_skbs(priv);
+err_out:
+ return ret;
+}
+
+static void mtk_star_disable(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ mtk_star_intr_disable(priv);
+ mtk_star_dma_disable(priv);
+ mtk_star_intr_ack_all(priv);
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ free_irq(ndev->irq, ndev);
+ mtk_star_free_rx_skbs(priv);
+ mtk_star_free_tx_skbs(priv);
+}
+
+static int mtk_star_netdev_open(struct net_device *ndev)
+{
+ return mtk_star_enable(ndev);
+}
+
+static int mtk_star_netdev_stop(struct net_device *ndev)
+{
+ mtk_star_disable(ndev);
+
+ return 0;
+}
+
+static int mtk_star_netdev_ioctl(struct net_device *ndev,
+ struct ifreq *req, int cmd)
+{
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(ndev->phydev, req, cmd);
+}
+
+static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc_data desc_data;
+
+ desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
+ if (dma_mapping_error(dev, desc_data.dma_addr))
+ goto err_drop_packet;
+
+ desc_data.skb = skb;
+ desc_data.len = skb->len;
+
+ spin_lock_bh(&priv->lock);
+
+ mtk_star_ring_push_head_tx(ring, &desc_data);
+
+ netdev_sent_queue(ndev, skb->len);
+
+ if (mtk_star_ring_full(ring))
+ netif_stop_queue(ndev);
+
+ spin_unlock_bh(&priv->lock);
+
+ mtk_star_dma_resume_tx(priv);
+
+ return NETDEV_TX_OK;
+
+err_drop_packet:
+ dev_kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_BUSY;
+}
+
+/* Returns the number of bytes sent or a negative number on the first
+ * descriptor owned by DMA.
+ */
+static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct mtk_star_ring_desc_data desc_data;
+ int ret;
+
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return ret;
+
+ mtk_star_dma_unmap_tx(priv, &desc_data);
+ ret = desc_data.skb->len;
+ dev_kfree_skb_irq(desc_data.skb);
+
+ return ret;
+}
+
+static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct net_device *ndev = priv->ndev;
+ int ret, pkts_compl, bytes_compl;
+ bool wake = false;
+
+ spin_lock(&priv->lock);
+
+ for (pkts_compl = 0, bytes_compl = 0;;
+ pkts_compl++, bytes_compl += ret, wake = true) {
+ if (!mtk_star_ring_descs_available(ring))
+ break;
+
+ ret = mtk_star_tx_complete_one(priv);
+ if (ret < 0)
+ break;
+ }
+
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+ if (wake && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ mtk_star_intr_enable_tx(priv);
+
+ spin_unlock(&priv->lock);
+}
+
+static void mtk_star_netdev_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+
+ mtk_star_update_stats(priv);
+
+ memcpy(stats, &priv->stats, sizeof(*stats));
+}
+
+static void mtk_star_set_rx_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *hw_addr;
+ unsigned int hash_addr, i;
+ int ret;
+
+ if (ndev->flags & IFF_PROMISC) {
+ regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
+ MTK_STAR_BIT_ARL_CFG_MISC_MODE);
+ } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
+ ndev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
+ ret = mtk_star_set_hashbit(priv, i);
+ if (ret)
+ goto hash_fail;
+ }
+ } else {
+ /* Clear previous settings. */
+ ret = mtk_star_reset_hash_table(priv);
+ if (ret)
+ goto hash_fail;
+
+ netdev_for_each_mc_addr(hw_addr, ndev) {
+ hash_addr = (hw_addr->addr[0] & 0x01) << 8;
+ hash_addr += hw_addr->addr[5];
+ ret = mtk_star_set_hashbit(priv, hash_addr);
+ if (ret)
+ goto hash_fail;
+ }
+ }
+
+ return;
+
+hash_fail:
+ if (ret == -ETIMEDOUT)
+ netdev_err(ndev, "setting hash bit timed out\n");
+ else
+ /* Should be -EIO */
+ netdev_err(ndev, "unable to set hash bit");
+}
+
+static const struct net_device_ops mtk_star_netdev_ops = {
+ .ndo_open = mtk_star_netdev_open,
+ .ndo_stop = mtk_star_netdev_stop,
+ .ndo_start_xmit = mtk_star_netdev_start_xmit,
+ .ndo_get_stats64 = mtk_star_netdev_get_stats64,
+ .ndo_set_rx_mode = mtk_star_set_rx_mode,
+ .ndo_do_ioctl = mtk_star_netdev_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void mtk_star_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
+}
+
+/* TODO Add ethtool stats. */
+static const struct ethtool_ops mtk_star_ethtool_ops = {
+ .get_drvinfo = mtk_star_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->rx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc_data desc_data;
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *curr_skb, *new_skb;
+ dma_addr_t new_dma_addr;
+ int ret;
+
+ spin_lock(&priv->lock);
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ spin_unlock(&priv->lock);
+ if (ret)
+ return -1;
+
+ curr_skb = desc_data.skb;
+
+ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+ /* Error packet -> drop and reuse skb. */
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
+
+ /* Prepare new skb before receiving the current one. Reuse the current
+ * skb if we fail at any point.
+ */
+ new_skb = mtk_star_alloc_skb(ndev);
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
+
+ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma_addr)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb(new_skb);
+ new_skb = curr_skb;
+ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+ goto push_new_skb;
+ }
+
+ desc_data.dma_addr = new_dma_addr;
+
+ /* We can't fail anymore at this point: it's safe to unmap the skb. */
+ mtk_star_dma_unmap_rx(priv, &desc_data);
+
+ skb_put(desc_data.skb, desc_data.len);
+ desc_data.skb->ip_summed = CHECKSUM_NONE;
+ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+ desc_data.skb->dev = ndev;
+ netif_receive_skb(desc_data.skb);
+
+push_new_skb:
+ desc_data.len = skb_tailroom(new_skb);
+ desc_data.skb = new_skb;
+
+ spin_lock(&priv->lock);
+ mtk_star_ring_push_head_rx(ring, &desc_data);
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
+{
+ int received, ret;
+
+ for (received = 0, ret = 0; received < budget && ret == 0; received++)
+ ret = mtk_star_receive_packet(priv);
+
+ mtk_star_dma_resume_rx(priv);
+
+ return received;
+}
+
+static int mtk_star_poll(struct napi_struct *napi, int budget)
+{
+ struct mtk_star_priv *priv;
+ int received = 0;
+
+ priv = container_of(napi, struct mtk_star_priv, napi);
+
+ /* Clean-up all TX descriptors. */
+ mtk_star_tx_complete_all(priv);
+ /* Receive up to $budget packets. */
+ received = mtk_star_process_rx(priv, budget);
+
+ if (received < budget) {
+ napi_complete_done(napi, received);
+ mtk_star_intr_enable_rx(priv);
+ }
+
+ return received;
+}
+
+static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
+ MTK_STAR_BIT_PHY_CTRL0_RWOK);
+}
+
+static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
+ val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
+ 10, MTK_STAR_WAIT_TIMEOUT);
+}
+
+static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
+{
+ struct mtk_star_priv *priv = mii->priv;
+ unsigned int val, data;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ mtk_star_mdio_rwok_clear(priv);
+
+ val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
+ val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
+ val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
+
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
+
+ ret = mtk_star_mdio_rwok_wait(priv);
+ if (ret)
+ return ret;
+
+ regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
+
+ data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
+ data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
+
+ return data;
+}
+
+static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
+ int regnum, u16 data)
+{
+ struct mtk_star_priv *priv = mii->priv;
+ unsigned int val;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ mtk_star_mdio_rwok_clear(priv);
+
+ val = data;
+ val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
+ val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
+ regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
+ regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
+ val |= regnum;
+ val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
+
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
+
+ return mtk_star_mdio_rwok_wait(priv);
+}
+
+static int mtk_star_mdio_init(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ struct device_node *of_node, *mdio_node;
+ int ret;
+
+ of_node = dev->of_node;
+
+ mdio_node = of_get_child_by_name(of_node, "mdio");
+ if (!mdio_node)
+ return -ENODEV;
+
+ if (!of_device_is_available(mdio_node)) {
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ priv->mii = devm_mdiobus_alloc(dev);
+ if (!priv->mii) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+ priv->mii->name = "mtk-mac-mdio";
+ priv->mii->parent = dev;
+ priv->mii->read = mtk_star_mdio_read;
+ priv->mii->write = mtk_star_mdio_write;
+ priv->mii->priv = priv;
+
+ ret = of_mdiobus_register(priv->mii, mdio_node);
+
+out_put_node:
+ of_node_put(mdio_node);
+ return ret;
+}
+
+static __maybe_unused int mtk_star_suspend(struct device *dev)
+{
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+
+ ndev = dev_get_drvdata(dev);
+ priv = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ mtk_star_disable(ndev);
+
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_star_resume(struct device *dev)
+{
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ ndev = dev_get_drvdata(dev);
+ priv = netdev_priv(ndev);
+
+ ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ if (netif_running(ndev)) {
+ ret = mtk_star_enable(ndev);
+ if (ret)
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+ }
+
+ return ret;
+}
+
+static void mtk_star_clk_disable_unprepare(void *data)
+{
+ struct mtk_star_priv *priv = data;
+
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+}
+
+static void mtk_star_mdiobus_unregister(void *data)
+{
+ struct mtk_star_priv *priv = data;
+
+ mdiobus_unregister(priv->mii);
+}
+
+static int mtk_star_probe(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+ struct device *dev;
+ void __iomem *base;
+ int ret, i;
+
+ dev = &pdev->dev;
+ of_node = dev->of_node;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ SET_NETDEV_DEV(ndev, dev);
+ platform_set_drvdata(pdev, ndev);
+
+ ndev->min_mtu = ETH_ZLEN;
+ ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
+
+ spin_lock_init(&priv->lock);
+ INIT_WORK(&priv->stats_work, mtk_star_update_stats_work);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* We won't be checking the return values of regmap read & write
+ * functions. They can only fail for mmio if there's a clock attached
+ * to regmap which is not the case here.
+ */
+ priv->regs = devm_regmap_init_mmio(dev, base,
+ &mtk_star_regmap_config);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
+ "mediatek,pericfg");
+ if (IS_ERR(priv->pericfg)) {
+ dev_err(dev, "Failed to lookup the PERICFG syscon\n");
+ return PTR_ERR(priv->pericfg);
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0)
+ return ndev->irq;
+
+ for (i = 0; i < MTK_STAR_NCLKS; i++)
+ priv->clks[i].id = mtk_star_clk_names[i];
+ ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev,
+ mtk_star_clk_disable_unprepare, priv);
+ if (ret)
+ return ret;
+
+ ret = of_get_phy_mode(of_node, &priv->phy_intf);
+ if (ret) {
+ return ret;
+ } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
+ dev_err(dev, "unsupported phy mode: %s\n",
+ phy_modes(priv->phy_intf));
+ return -EINVAL;
+ }
+
+ priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ dev_err(dev, "failed to retrieve the phy handle from device tree\n");
+ return -ENODEV;
+ }
+
+ mtk_star_set_mode_rmii(priv);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "unsupported DMA mask\n");
+ return ret;
+ }
+
+ priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
+ &priv->dma_addr,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->ring_base)
+ return -ENOMEM;
+
+ mtk_star_nic_disable_pd(priv);
+ mtk_star_init_config(priv);
+
+ ret = mtk_star_mdio_init(ndev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, mtk_star_mdiobus_unregister, priv);
+ if (ret)
+ return ret;
+
+ ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
+ if (ret || !is_valid_ether_addr(ndev->dev_addr))
+ eth_hw_addr_random(ndev);
+
+ ndev->netdev_ops = &mtk_star_netdev_ops;
+ ndev->ethtool_ops = &mtk_star_ethtool_ops;
+
+ netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
+
+ return devm_register_netdev(dev, ndev);
+}
+
+static const struct of_device_id mtk_star_of_match[] = {
+ { .compatible = "mediatek,mt8516-eth", },
+ { .compatible = "mediatek,mt8518-eth", },
+ { .compatible = "mediatek,mt8175-eth", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mtk_star_of_match);
+
+static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
+ mtk_star_suspend, mtk_star_resume);
+
+static struct platform_driver mtk_star_driver = {
+ .driver = {
+ .name = MTK_STAR_DRVNAME,
+ .pm = &mtk_star_pm_ops,
+ .of_match_table = of_match_ptr(mtk_star_of_match),
+ },
+ .probe = mtk_star_probe,
+};
+module_platform_driver(mtk_star_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index 73eae80e1cb7..ac5468b77488 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -197,6 +197,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
err = devlink_region_snapshot_id_get(devlink, &id);
if (err) {
mlx4_err(dev, "crdump: devlink get snapshot id err %d\n", err);
+ iounmap(cr_space);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 8a5ea2543670..b816154bc79a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1235,7 +1235,6 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
u32 i, rss_rings;
- int err = 0;
rss_rings = priv->prof->rss_rings ?: n;
rss_rings = rounddown_pow_of_two(rss_rings);
@@ -1249,7 +1248,7 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
if (hfunc)
*hfunc = priv->rss_hash_fn;
- return err;
+ return 0;
}
static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
@@ -1393,7 +1392,6 @@ static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
struct mlx4_spec_list *spec_l2,
unsigned char *mac)
{
- int err = 0;
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
@@ -1408,7 +1406,7 @@ static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
list_add_tail(&spec_l2->list, rule_list_h);
- return err;
+ return 0;
}
static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 43dcbd8214c6..5bd3cd37d50f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -51,7 +51,8 @@
#include "en_port.h"
#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
- XDP_PACKET_HEADROOM))
+ XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
int mlx4_en_setup_tc(struct net_device *dev, u8 up)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index db3552f2d087..8a10285b0e10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -683,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
xdp.rxq = &ring->xdp_rxq;
+ xdp.frame_sz = priv->frag_info[0].frag_stride;
doorbell_pending = 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -946,7 +947,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) {
clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
- budget);
+ budget) < budget;
xdp_tx_cq->xdp_busy = !clean_complete;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a30edb436f4a..9dff7b086c9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -392,8 +392,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget)
+int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -415,7 +415,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 ring_cons;
if (unlikely(!priv->port_up))
- return true;
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -492,7 +492,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
- return done < budget;
+ return done;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
@@ -504,7 +504,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
ring->wake_queue++;
}
- return done < budget;
+ return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -524,14 +524,14 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- bool clean_complete;
+ int work_done;
- clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
- if (!clean_complete)
+ work_done = mlx4_en_process_tx_cq(dev, cq, budget);
+ if (work_done >= budget)
return budget;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
+ if (napi_complete_done(napi, work_done))
+ mlx4_en_arm_cq(priv, cq);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 630f15977f09..9f5603612960 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -737,8 +737,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
-bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget);
+int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget);
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index fd375cbe586e..b6ffd1622cfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -78,9 +78,24 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads).
+config MLX5_CLS_ACT
+ bool "MLX5 TC classifier action support"
+ depends on MLX5_ESWITCH && NET_CLS_ACT
+ default y
+ help
+ mlx5 ConnectX offloads support for TC classifier action (NET_CLS_ACT),
+ works in both native NIC mode and Switchdev SRIOV mode.
+ Actions get attached to a Hardware offloaded classifiers and are
+ invoked after a successful classification. Actions are used to
+ overwrite the classification result, instantly drop or redirect and/or
+ reformat packets in wire speeds without involving the host cpu.
+
+ If set to N, TC offloads in both NIC and switchdev modes will be disabled.
+ If unsure, set to Y
+
config MLX5_TC_CT
bool "MLX5 TC connection tracking offload support"
- depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
default y
help
Say Y here if you want to support offloading connection tracking rules
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 6d32915000fc..b61e47bc16e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
# mlx5 core basic
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
- health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
+ health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
@@ -33,17 +33,24 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
- lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
- en/tc_tun_geneve.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
+ en_rep.o en/rep/bond.o
+mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
+ en/mapping.o esw/chains.o en/tc_tun.o \
+ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
+ en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o esw/chains.o
+ ecpf.o rdma.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
+ esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
+ esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
+
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
index c13260467750..82b185121edb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
@@ -5,7 +5,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include "en.h"
static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
index eddc34e4a762..8a4985d8cbfe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
@@ -58,12 +58,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ u32 *sa_handle)
{
- return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
- spi, is_ipv6);
+ __be32 saddr[4] = {}, daddr[4] = {};
+
+ if (!xfrm->attrs.is_ipv6) {
+ saddr[3] = xfrm->attrs.saddr.a4;
+ daddr[3] = xfrm->attrs.daddr.a4;
+ } else {
+ memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
+ memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
+ }
+
+ return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr,
+ daddr, xfrm->attrs.spi,
+ xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(void *context)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
index 530e428d46ab..e89747674712 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
@@ -48,9 +48,7 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6);
+ u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
@@ -64,9 +62,7 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
static inline void *
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4],
- const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ u32 *sa_handle)
{
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
index cab708af3422..cbf3d76c05a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -56,8 +56,8 @@ void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
}
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
- u64 rcd_sn)
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn)
{
return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index e09bc3858d57..aefea467f7b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -109,8 +109,8 @@ int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
bool direction_sx);
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx);
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
- u64 rcd_sn);
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
@@ -125,8 +125,8 @@ mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
bool direction_sx) { return -ENOTSUPP; }
static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx) { }
-static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
- u32 seq, u64 rcd_sn) { return 0; }
+static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
{
return mlx5_accel_is_ktls_device(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 7a77fe40af3a..1d91a0d0ab1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1072,7 +1072,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode);
- if (op < ARRAY_SIZE(cmd->stats)) {
+ if (op < MLX5_CMD_OP_MAX) {
stats = &cmd->stats[op];
spin_lock_irq(&stats->lock);
stats->sum += ds;
@@ -1551,7 +1551,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
if (ent->callback) {
ds = ent->ts2 - ent->ts1;
- if (ent->op < ARRAY_SIZE(cmd->stats)) {
+ if (ent->op < MLX5_CMD_OP_MAX) {
stats = &cmd->stats[ent->op];
spin_lock_irqsave(&stats->lock, flags);
stats->sum += ds;
@@ -1936,6 +1936,11 @@ static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
cmd->alloc_dma);
}
+static u16 cmdif_rev(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+}
+
int mlx5_cmd_init(struct mlx5_core_dev *dev)
{
int size = sizeof(struct mlx5_cmd_prot_block);
@@ -1955,10 +1960,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
- cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
- if (!cmd->pool)
+ cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
+ if (!cmd->stats)
return -ENOMEM;
+ cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
+ if (!cmd->pool) {
+ err = -ENOMEM;
+ goto dma_pool_err;
+ }
+
err = alloc_cmd_page(dev, cmd);
if (err)
goto err_free_pool;
@@ -1994,7 +2005,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
spin_lock_init(&cmd->alloc_lock);
spin_lock_init(&cmd->token_lock);
- for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
+ for (i = 0; i < MLX5_CMD_OP_MAX; i++)
spin_lock_init(&cmd->stats[i].lock);
sema_init(&cmd->sem, cmd->max_reg_cmds);
@@ -2041,7 +2052,8 @@ err_free_page:
err_free_pool:
dma_pool_destroy(cmd->pool);
-
+dma_pool_err:
+ kvfree(cmd->stats);
return err;
}
EXPORT_SYMBOL(mlx5_cmd_init);
@@ -2055,6 +2067,7 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
+ kvfree(cmd->stats);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 818edc63e428..8379b24cb838 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
@@ -91,8 +90,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
- u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
- u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
+ u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
struct mlx5_eq_comp *eq;
int err;
@@ -142,20 +140,17 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
err_cq_add:
mlx5_eq_del_cq(&eq->core, cq);
err_cmd:
- memset(din, 0, sizeof(din));
- memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, din, uid, cq->uid);
- mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+ mlx5_cmd_exec_in(dev, destroy_cq, din);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
- u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
int err;
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
@@ -164,7 +159,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, destroy_cq, in);
if (err)
return err;
@@ -179,20 +174,20 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *out, int outlen)
+ u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {};
MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
MLX5_SET(query_cq_in, in, cqn, cq->cqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_cq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {};
MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
MLX5_SET(modify_cq_in, in, uid, cq->uid);
@@ -205,7 +200,7 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period,
u16 cq_max_count)
{
- u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
void *cqc;
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 04854e5fbcd7..07c8d9811bc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -101,15 +101,15 @@ void mlx5_unregister_debugfs(void)
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- atomic_set(&dev->num_qps, 0);
-
dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
}
+EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
debugfs_remove_recursive(dev->priv.qp_debugfs);
}
+EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
@@ -171,7 +171,7 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
cmd = &dev->priv.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
- for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
+ for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
@@ -203,41 +203,41 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index, int *is_str)
{
int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
- struct mlx5_qp_context *ctx;
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
u64 param = 0;
u32 *out;
+ int state;
+ u32 *qpc;
int err;
- int no_sq;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
- return param;
+ return 0;
- err = mlx5_core_qp_query(dev, qp, out, outlen);
- if (err) {
- mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
+ MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+ MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
+ if (err)
goto out;
- }
*is_str = 0;
- /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
- ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
-
+ qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
switch (index) {
case QP_PID:
param = qp->pid;
break;
case QP_STATE:
- param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+ state = MLX5_GET(qpc, qpc, state);
+ param = (unsigned long)mlx5_qp_state_str(state);
*is_str = 1;
break;
case QP_XPORT:
- param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+ param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
*is_str = 1;
break;
case QP_MTU:
- switch (ctx->mtu_msgmax >> 5) {
+ switch (MLX5_GET(qpc, qpc, mtu)) {
case IB_MTU_256:
param = 256;
break;
@@ -258,46 +258,32 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
}
break;
case QP_N_RECV:
- param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
+ param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
break;
case QP_RECV_SZ:
- param = 1 << ((ctx->rq_size_stride & 7) + 4);
+ param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
break;
case QP_N_SEND:
- no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
- if (!no_sq)
- param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
- else
- param = 0;
+ if (!MLX5_GET(qpc, qpc, no_sq))
+ param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
break;
case QP_LOG_PG_SZ:
- param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
- param += 12;
+ param = MLX5_GET(qpc, qpc, log_page_size) + 12;
break;
case QP_RQPN:
- param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
+ param = MLX5_GET(qpc, qpc, remote_qpn);
break;
}
-
out:
kfree(out);
return param;
}
-static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
-
- MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
- MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
+ u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
u64 param = 0;
void *ctx;
u32 *out;
@@ -307,7 +293,9 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
if (!out)
return param;
- err = mlx5_core_eq_query(dev, eq, out, outlen);
+ MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+ MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+ err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
@@ -344,7 +332,7 @@ static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
if (!out)
return param;
- err = mlx5_core_query_cq(dev, cq, out, outlen);
+ err = mlx5_core_query_cq(dev, cq, out);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
@@ -461,6 +449,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
return err;
}
+EXPORT_SYMBOL(mlx5_debug_qp_add);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
{
@@ -470,6 +459,7 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
if (qp->dbg)
rem_res_tree(qp->dbg);
}
+EXPORT_SYMBOL(mlx5_debug_qp_remove);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 8ecac81a385d..a700f3c86899 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -76,58 +76,59 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
.v = MLX5_GET(fte_match_set_lyr_2_4, value, dmac_47_16) << 16 |
MLX5_GET(fte_match_set_lyr_2_4, value, dmac_15_0)};
MASK_VAL_L2(u16, ethertype, ethertype);
+ MASK_VAL_L2(u8, ip_version, ip_version);
PRINT_MASKED_VALP(smac, u8 *, p, "%pM");
PRINT_MASKED_VALP(dmac, u8 *, p, "%pM");
PRINT_MASKED_VAL(ethertype, p, "%04x");
- if (ethertype.m == 0xffff) {
- if (ethertype.v == ETH_P_IP) {
+ if ((ethertype.m == 0xffff && ethertype.v == ETH_P_IP) ||
+ (ip_version.m == 0xf && ip_version.v == 4)) {
#define MASK_VAL_L2_BE(type, name, fld) \
MASK_VAL_BE(type, fte_match_set_lyr_2_4, name, mask, value, fld)
- MASK_VAL_L2_BE(u32, src_ipv4,
- src_ipv4_src_ipv6.ipv4_layout.ipv4);
- MASK_VAL_L2_BE(u32, dst_ipv4,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ MASK_VAL_L2_BE(u32, src_ipv4,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MASK_VAL_L2_BE(u32, dst_ipv4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
- PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p,
- "%pI4");
- PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p,
- "%pI4");
- } else if (ethertype.v == ETH_P_IPV6) {
- static const struct in6_addr full_ones = {
- .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
- __constant_htonl(0xffffffff),
- __constant_htonl(0xffffffff),
- __constant_htonl(0xffffffff)},
- };
- DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
- DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
+ PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p,
+ "%pI4");
+ PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p,
+ "%pI4");
+ } else if ((ethertype.m == 0xffff && ethertype.v == ETH_P_IPV6) ||
+ (ip_version.m == 0xf && ip_version.v == 6)) {
+ static const struct in6_addr full_ones = {
+ .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff)},
+ };
+ DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
+ DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
- memcpy(src_ipv6.m.in6_u.u6_addr8,
- MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(src_ipv6.m));
- memcpy(dst_ipv6.m.in6_u.u6_addr8,
- MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(dst_ipv6.m));
- memcpy(src_ipv6.v.in6_u.u6_addr8,
- MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(src_ipv6.v));
- memcpy(dst_ipv6.v.in6_u.u6_addr8,
- MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(dst_ipv6.v));
+ memcpy(src_ipv6.m.in6_u.u6_addr8,
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(src_ipv6.m));
+ memcpy(dst_ipv6.m.in6_u.u6_addr8,
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(dst_ipv6.m));
+ memcpy(src_ipv6.v.in6_u.u6_addr8,
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(src_ipv6.v));
+ memcpy(dst_ipv6.v.in6_u.u6_addr8,
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(dst_ipv6.v));
- if (!memcmp(&src_ipv6.m, &full_ones, sizeof(full_ones)))
- trace_seq_printf(p, "src_ipv6=%pI6 ",
- &src_ipv6.v);
- if (!memcmp(&dst_ipv6.m, &full_ones, sizeof(full_ones)))
- trace_seq_printf(p, "dst_ipv6=%pI6 ",
- &dst_ipv6.v);
- }
+ if (!memcmp(&src_ipv6.m, &full_ones, sizeof(full_ones)))
+ trace_seq_printf(p, "src_ipv6=%pI6 ",
+ &src_ipv6.v);
+ if (!memcmp(&dst_ipv6.m, &full_ones, sizeof(full_ones)))
+ trace_seq_printf(p, "dst_ipv6=%pI6 ",
+ &dst_ipv6.v);
}
#define PRINT_MASKED_VAL_L2(type, name, fld, p, format) {\
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 5ce6ebbc7f10..a7551274be58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -684,7 +684,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]);
while (block_timestamp > tracer->last_timestamp) {
- /* Check block override if its not the first block */
+ /* Check block override if it's not the first block */
if (!tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index d2228e37450f..a894ea98c95a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -8,33 +8,13 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
}
-static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev)
-{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
-
- MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
- MLX5_SET(enable_hca_in, in, function_id, 0);
- MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
- return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-}
-
-static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
-{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
-
- MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
- MLX5_SET(disable_hca_in, in, function_id, 0);
- MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
int err;
- err = mlx5_peer_pf_enable_hca(dev);
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ err = mlx5_cmd_exec_in(dev, enable_hca, in);
if (err)
mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
err);
@@ -44,9 +24,11 @@ static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
{
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
int err;
- err = mlx5_peer_pf_disable_hca(dev);
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ err = mlx5_cmd_exec_in(dev, disable_hca, in);
if (err) {
mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0a5aada0f50f..842db20493df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -36,7 +36,6 @@
#include <linux/etherdevice.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/crash_dump.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
@@ -53,6 +52,7 @@
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
+#include "en/dcbnl.h"
#include "en/fs.h"
#include "lib/hv_vhca.h"
@@ -69,8 +69,6 @@ struct page_pool;
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
-#define MLX5E_MAX_PRIORITY 8
-#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
#define MLX5_RX_HEADROOM NET_SKB_PAD
@@ -243,10 +241,6 @@ enum mlx5e_priv_flag {
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#endif
-
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -271,42 +265,6 @@ struct mlx5e_params {
int hard_mtu;
};
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-struct mlx5e_cee_config {
- /* bw pct for priority group */
- u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
- u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
- bool pfc_setting[CEE_DCBX_MAX_PRIO];
- bool pfc_enable;
-};
-
-enum {
- MLX5_DCB_CHG_RESET,
- MLX5_DCB_NO_CHG,
- MLX5_DCB_CHG_NO_RESET,
-};
-
-struct mlx5e_dcbx {
- enum mlx5_dcbx_oper_mode mode;
- struct mlx5e_cee_config cee_cfg; /* pending configuration */
- u8 dscp_app_cnt;
-
- /* The only setting that cannot be read from FW */
- u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
- u8 cap;
-
- /* Buffer configuration */
- bool manual_buffer;
- u32 cable_len;
- u32 xoff;
-};
-
-struct mlx5e_dcbx_dp {
- u8 dscp2prio[MLX5E_MAX_DSCP];
- u8 trust_state;
-};
-#endif
-
enum {
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_RECOVERING,
@@ -339,16 +297,6 @@ struct mlx5e_cq_decomp {
u16 wqe_counter;
} ____cacheline_aligned_in_smp;
-struct mlx5e_tx_wqe_info {
- struct sk_buff *skb;
- u32 num_bytes;
- u8 num_wqebbs;
- u8 num_dma;
-#ifdef CONFIG_MLX5_EN_TLS
- struct page *resync_dump_frag_page;
-#endif
-};
-
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
@@ -370,18 +318,6 @@ enum {
MLX5E_SQ_STATE_PENDING_XSK_TX,
};
-struct mlx5e_sq_wqe_info {
- u8 opcode;
- u8 num_wqebbs;
-
- /* Auxiliary data for different opcodes. */
- union {
- struct {
- struct mlx5e_rq *rq;
- } umr;
- };
-};
-
struct mlx5e_txqsq {
/* data path */
@@ -429,10 +365,7 @@ struct mlx5e_dma_info {
dma_addr_t addr;
union {
struct page *page;
- struct {
- u64 handle;
- void *data;
- } xsk;
+ struct xdp_buff *xsk;
};
};
@@ -484,11 +417,6 @@ struct mlx5e_xdp_info_fifo {
u32 mask;
};
-struct mlx5e_xdp_wqe_info {
- u8 num_wqebbs;
- u8 num_pkts;
-};
-
struct mlx5e_xdp_mpwqe {
/* Current MPWQE session */
struct mlx5e_tx_wqe *wqe;
@@ -552,7 +480,7 @@ struct mlx5e_icosq {
/* write@xmit, read@completion */
struct {
- struct mlx5e_sq_wqe_info *ico_wqe;
+ struct mlx5e_icosq_wqe_info *wqe_info;
} db;
/* read only */
@@ -650,8 +578,8 @@ struct mlx5e_rq {
} mpwqe;
};
struct {
- u16 umem_headroom;
u16 headroom;
+ u32 frame0_sz;
u8 map_dir; /* dma map direction */
} buff;
@@ -682,7 +610,6 @@ struct mlx5e_rq {
struct page_pool *page_pool;
/* AF_XDP zero-copy */
- struct zero_copy_allocator zca;
struct xdp_umem *umem;
struct work_struct recover_work;
@@ -919,8 +846,8 @@ void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
@@ -1013,7 +940,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner);
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in);
struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
struct mlx5e_xsk_param;
@@ -1097,21 +1024,15 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
-int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
-void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
-void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
-void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
-#endif
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir, u32 *in, int inlen);
+int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
+ u32 *in);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
+ bool enable_mc_lb);
/* common netdev helpers */
void mlx5e_create_q_counters(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
new file mode 100644
index 000000000000..7be6b2d36b60
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __MLX5E_DCBNL_H__
+#define __MLX5E_DCBNL_H__
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+
+#define MLX5E_MAX_PRIORITY (8)
+
+struct mlx5e_cee_config {
+ /* bw pct for priority group */
+ u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
+ u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
+ bool pfc_setting[CEE_DCBX_MAX_PRIO];
+ bool pfc_enable;
+};
+
+struct mlx5e_dcbx {
+ enum mlx5_dcbx_oper_mode mode;
+ struct mlx5e_cee_config cee_cfg; /* pending configuration */
+ u8 dscp_app_cnt;
+
+ /* The only setting that cannot be read from FW */
+ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
+ u8 cap;
+
+ /* Buffer configuration */
+ bool manual_buffer;
+ u32 cable_len;
+ u32 xoff;
+};
+
+#define MLX5E_MAX_DSCP (64)
+
+struct mlx5e_dcbx_dp {
+ u8 dscp2prio[MLX5E_MAX_DSCP];
+ u8 trust_state;
+};
+
+void mlx5e_dcbnl_build_netdev(struct net_device *netdev);
+void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev);
+void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
+#else
+static inline void mlx5e_dcbnl_build_netdev(struct net_device *netdev) {}
+static inline void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) {}
+static inline void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) {}
+static inline void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) {}
+static inline void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) {}
+#endif
+
+#endif /* __MLX5E_DCBNL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 3a199a03d929..7283443868f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -43,7 +43,7 @@ int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
void *cqc;
int err;
- err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out));
+ err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 7cd5b02e0f10..8fe8b4d6ad1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -38,12 +38,11 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
{
- u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
- mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in);
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
@@ -66,19 +65,6 @@ static int mlx5e_monitor_event_handler(struct notifier_block *nb,
return NOTIFY_OK;
}
-static void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
-{
- MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
- MONITOR_COUNTER);
- mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
-}
-
-static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv)
-{
- mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
- cancel_work_sync(&priv->monitor_counters_work);
-}
-
static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in)
{
enum mlx5_monitor_counter_ppcnt ppcnt_cnt;
@@ -118,8 +104,7 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
- u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
int q_counter = priv->q_counter;
int cnt = 0;
@@ -136,34 +121,31 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(mdev, set_monitor_counter, in);
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
- mlx5e_monitor_counter_start(priv);
+ MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
+ MONITOR_COUNTER);
+ mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
+
mlx5e_set_monitor_counter(priv);
mlx5e_monitor_counter_arm(priv);
queue_work(priv->wq, &priv->update_stats_work);
}
-static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv)
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
- u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0);
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
-}
-
-/* check if mlx5e_monitor_counter_supported before calling this function*/
-void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
-{
- mlx5e_monitor_counter_disable(priv);
- mlx5e_monitor_counter_stop(priv);
+ mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in);
+ mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ cancel_work_sync(&priv->monitor_counters_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index eb2e1f2138e4..38e4f19d69f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -12,15 +12,16 @@ static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- u16 headroom = NET_IP_ALIGN;
+ u16 headroom;
- if (mlx5e_rx_is_xdp(params, xsk)) {
+ if (xsk)
+ return xsk->headroom;
+
+ headroom = NET_IP_ALIGN;
+ if (mlx5e_rx_is_xdp(params, xsk))
headroom += XDP_PACKET_HEADROOM;
- if (xsk)
- headroom += xsk->headroom;
- } else {
+ else
headroom += MLX5_RX_HEADROOM;
- }
return headroom;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
new file mode 100644
index 000000000000..bdb71332cbf2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <net/lag.h>
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "esw/acl/ofld.h"
+#include "en_rep.h"
+
+struct mlx5e_rep_bond {
+ struct notifier_block nb;
+ struct netdev_net_notifier nn;
+ struct list_head metadata_list;
+};
+
+struct mlx5e_rep_bond_slave_entry {
+ struct list_head list;
+ struct net_device *netdev;
+};
+
+struct mlx5e_rep_bond_metadata {
+ struct list_head list; /* link to global list of rep_bond_metadata */
+ struct mlx5_eswitch *esw;
+ /* private of uplink holding rep bond metadata list */
+ struct net_device *lag_dev;
+ u32 metadata_reg_c_0;
+
+ struct list_head slaves_list; /* slaves list */
+ int slaves;
+};
+
+static struct mlx5e_rep_bond_metadata *
+mlx5e_lookup_rep_bond_metadata(struct mlx5_rep_uplink_priv *uplink_priv,
+ const struct net_device *lag_dev)
+{
+ struct mlx5e_rep_bond_metadata *found = NULL;
+ struct mlx5e_rep_bond_metadata *cur;
+
+ list_for_each_entry(cur, &uplink_priv->bond->metadata_list, list) {
+ if (cur->lag_dev == lag_dev) {
+ found = cur;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static struct mlx5e_rep_bond_slave_entry *
+mlx5e_lookup_rep_bond_slave_entry(struct mlx5e_rep_bond_metadata *mdata,
+ const struct net_device *netdev)
+{
+ struct mlx5e_rep_bond_slave_entry *found = NULL;
+ struct mlx5e_rep_bond_slave_entry *cur;
+
+ list_for_each_entry(cur, &mdata->slaves_list, list) {
+ if (cur->netdev == netdev) {
+ found = cur;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static void mlx5e_rep_bond_metadata_release(struct mlx5e_rep_bond_metadata *mdata)
+{
+ netdev_dbg(mdata->lag_dev, "destroy rep_bond_metadata(%d)\n",
+ mdata->metadata_reg_c_0);
+ list_del(&mdata->list);
+ mlx5_esw_match_metadata_free(mdata->esw, mdata->metadata_reg_c_0);
+ WARN_ON(!list_empty(&mdata->slaves_list));
+ kfree(mdata);
+}
+
+/* This must be called under rtnl_lock */
+int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
+ struct net_device *lag_dev)
+{
+ struct mlx5e_rep_bond_slave_entry *s_entry;
+ struct mlx5e_rep_bond_metadata *mdata;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
+ int err;
+
+ ASSERT_RTNL();
+
+ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ mdata = mlx5e_lookup_rep_bond_metadata(&rpriv->uplink_priv, lag_dev);
+ if (!mdata) {
+ /* First netdev becomes slave, no metadata presents the lag_dev. Create one */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
+
+ mdata->lag_dev = lag_dev;
+ mdata->esw = esw;
+ INIT_LIST_HEAD(&mdata->slaves_list);
+ mdata->metadata_reg_c_0 = mlx5_esw_match_metadata_alloc(esw);
+ if (!mdata->metadata_reg_c_0) {
+ kfree(mdata);
+ return -ENOSPC;
+ }
+ list_add(&mdata->list, &rpriv->uplink_priv.bond->metadata_list);
+
+ netdev_dbg(lag_dev, "create rep_bond_metadata(%d)\n",
+ mdata->metadata_reg_c_0);
+ }
+
+ s_entry = kzalloc(sizeof(*s_entry), GFP_KERNEL);
+ if (!s_entry) {
+ err = -ENOMEM;
+ goto entry_alloc_err;
+ }
+
+ s_entry->netdev = netdev;
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+
+ err = mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport,
+ mdata->metadata_reg_c_0);
+ if (err)
+ goto ingress_err;
+
+ mdata->slaves++;
+ list_add_tail(&s_entry->list, &mdata->slaves_list);
+ netdev_dbg(netdev, "enslave rep vport(%d) lag_dev(%s) metadata(0x%x)\n",
+ rpriv->rep->vport, lag_dev->name, mdata->metadata_reg_c_0);
+
+ return 0;
+
+ingress_err:
+ kfree(s_entry);
+entry_alloc_err:
+ if (!mdata->slaves)
+ mlx5e_rep_bond_metadata_release(mdata);
+ return err;
+}
+
+/* This must be called under rtnl_lock */
+void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
+ const struct net_device *netdev,
+ const struct net_device *lag_dev)
+{
+ struct mlx5e_rep_bond_slave_entry *s_entry;
+ struct mlx5e_rep_bond_metadata *mdata;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
+
+ ASSERT_RTNL();
+
+ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ mdata = mlx5e_lookup_rep_bond_metadata(&rpriv->uplink_priv, lag_dev);
+ if (!mdata)
+ return;
+
+ s_entry = mlx5e_lookup_rep_bond_slave_entry(mdata, netdev);
+ if (!s_entry)
+ return;
+
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+
+ /* Reset bond_metadata to zero first then reset all ingress/egress
+ * acls and rx rules of unslave representor's vport
+ */
+ mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport, 0);
+ mlx5_esw_acl_egress_vport_unbond(esw, rpriv->rep->vport);
+ mlx5e_rep_bond_update(priv, false);
+
+ list_del(&s_entry->list);
+
+ netdev_dbg(netdev, "unslave rep vport(%d) lag_dev(%s) metadata(0x%x)\n",
+ rpriv->rep->vport, lag_dev->name, mdata->metadata_reg_c_0);
+
+ if (--mdata->slaves == 0)
+ mlx5e_rep_bond_metadata_release(mdata);
+ kfree(s_entry);
+}
+
+static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ /* A given netdev is not a representor or not a slave of LAG configuration */
+ if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
+ return false;
+
+ /* Egress acl forward to vport is supported only non-uplink representor */
+ return rpriv->rep->vport != MLX5_VPORT_UPLINK;
+}
+
+static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
+{
+ struct netdev_notifier_changelowerstate_info *info;
+ struct netdev_lag_lower_state_info *lag_info;
+ struct mlx5e_rep_priv *rpriv;
+ struct net_device *lag_dev;
+ struct mlx5e_priv *priv;
+ struct list_head *iter;
+ struct net_device *dev;
+ u16 acl_vport_num;
+ u16 fwd_vport_num;
+ int err;
+
+ if (!mlx5e_rep_is_lag_netdev(netdev))
+ return;
+
+ info = ptr;
+ lag_info = info->lower_state_info;
+ /* This is not an event of a representor becoming active slave */
+ if (!lag_info->tx_enabled)
+ return;
+
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+ fwd_vport_num = rpriv->rep->vport;
+ lag_dev = netdev_master_upper_dev_get(netdev);
+
+ netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
+ lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
+
+ /* Point everyone's egress acl to the vport of the active representor */
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ acl_vport_num = rpriv->rep->vport;
+ if (acl_vport_num != fwd_vport_num) {
+ /* Only single rx_rule for unique bond_metadata should be
+ * present, delete it if it's saved as passive vport's
+ * rx_rule with destination as passive vport's root_ft
+ */
+ mlx5e_rep_bond_update(priv, true);
+ err = mlx5_esw_acl_egress_vport_bond(priv->mdev->priv.eswitch,
+ fwd_vport_num,
+ acl_vport_num);
+ if (err)
+ netdev_warn(dev,
+ "configure slave vport(%d) egress fwd, err(%d)",
+ acl_vport_num, err);
+ }
+ }
+
+ /* Insert new rx_rule for unique bond_metadata, save it as active vport's
+ * rx_rule with new destination as active vport's root_ft
+ */
+ err = mlx5e_rep_bond_update(netdev_priv(netdev), false);
+ if (err)
+ netdev_warn(netdev, "configure active slave vport(%d) rx_rule, err(%d)",
+ fwd_vport_num, err);
+}
+
+static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct mlx5e_rep_priv *rpriv;
+ struct net_device *lag_dev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_rep_is_lag_netdev(netdev))
+ return;
+
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+ lag_dev = info->upper_dev;
+
+ netdev_dbg(netdev, "%sslave vport(%d) lag(%s)\n",
+ info->linking ? "en" : "un", rpriv->rep->vport, lag_dev->name);
+
+ if (info->linking)
+ mlx5e_rep_bond_enslave(priv->mdev->priv.eswitch, netdev, lag_dev);
+ else
+ mlx5e_rep_bond_unslave(priv->mdev->priv.eswitch, netdev, lag_dev);
+}
+
+/* Bond device of representors and netdev events are used here in specific way
+ * to support eswitch vports bonding and to perform failover of eswitch vport
+ * by modifying the vport's egress acl of lower dev representors. Thus this
+ * also change the traditional behavior of lower dev under bond device.
+ * All non-representor netdevs or representors of other vendors as lower dev
+ * of bond device are not supported.
+ */
+static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_CHANGELOWERSTATE:
+ mlx5e_rep_changelowerstate_event(netdev, ptr);
+ break;
+ case NETDEV_CHANGEUPPER:
+ mlx5e_rep_changeupper_event(netdev, ptr);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/* If HW support eswitch vports bonding, register a specific notifier to
+ * handle it when two or more representors are bonded
+ */
+int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ struct net_device *netdev = rpriv->netdev;
+ struct mlx5e_priv *priv;
+ int ret = 0;
+
+ priv = netdev_priv(netdev);
+ if (!mlx5_esw_acl_egress_fwd2vport_supported(priv->mdev->priv.eswitch))
+ goto out;
+
+ uplink_priv->bond = kvzalloc(sizeof(*uplink_priv->bond), GFP_KERNEL);
+ if (!uplink_priv->bond) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&uplink_priv->bond->metadata_list);
+ uplink_priv->bond->nb.notifier_call = mlx5e_rep_esw_bond_netevent;
+ ret = register_netdevice_notifier_dev_net(netdev,
+ &uplink_priv->bond->nb,
+ &uplink_priv->bond->nn);
+ if (ret) {
+ netdev_err(netdev, "register bonding netevent notifier, err(%d)\n", ret);
+ kvfree(uplink_priv->bond);
+ uplink_priv->bond = NULL;
+ }
+
+out:
+ return ret;
+}
+
+void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+
+ if (!mlx5_esw_acl_egress_fwd2vport_supported(priv->mdev->priv.eswitch) ||
+ !rpriv->uplink_priv.bond)
+ return;
+
+ unregister_netdevice_notifier_dev_net(rpriv->netdev,
+ &rpriv->uplink_priv.bond->nb,
+ &rpriv->uplink_priv.bond->nn);
+ kvfree(rpriv->uplink_priv.bond);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
new file mode 100644
index 000000000000..baa162432e75
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#include <linux/refcount.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/rwlock.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include "neigh.h"
+#include "tc.h"
+#include "en_rep.h"
+#include "fs_core.h"
+#include "diag/en_rep_tracepoint.h"
+
+static unsigned long mlx5e_rep_ipv6_interval(void)
+{
+ if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
+ return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
+
+ return ~0UL;
+}
+
+static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
+{
+ unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
+ unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
+ struct net_device *netdev = rpriv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
+ mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
+}
+
+void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+
+ mlx5_fc_queue_stats_work(priv->mdev,
+ &neigh_update->neigh_stats_work,
+ neigh_update->min_interval);
+}
+
+static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
+{
+ return refcount_inc_not_zero(&nhe->refcnt);
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
+
+void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
+{
+ if (refcount_dec_and_test(&nhe->refcnt)) {
+ mlx5e_rep_neigh_entry_remove(nhe);
+ kfree_rcu(nhe, rcu);
+ }
+}
+
+static struct mlx5e_neigh_hash_entry *
+mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh_hash_entry *next = NULL;
+
+ rcu_read_lock();
+
+ for (next = nhe ?
+ list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &nhe->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list) :
+ list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list);
+ next;
+ next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &next->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list))
+ if (mlx5e_rep_neigh_entry_hold(next))
+ break;
+
+ rcu_read_unlock();
+
+ if (nhe)
+ mlx5e_rep_neigh_entry_release(nhe);
+
+ return next;
+}
+
+static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
+{
+ struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
+ neigh_update.neigh_stats_work.work);
+ struct net_device *netdev = rpriv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
+
+ rtnl_lock();
+ if (!list_empty(&rpriv->neigh_update.neigh_list))
+ mlx5e_rep_queue_neigh_stats_work(priv);
+
+ while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
+ mlx5e_tc_update_neigh_used_value(nhe);
+
+ rtnl_unlock();
+}
+
+static void mlx5e_rep_neigh_update(struct work_struct *work)
+{
+ struct mlx5e_neigh_hash_entry *nhe =
+ container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
+ struct neighbour *n = nhe->n;
+ struct mlx5e_encap_entry *e;
+ unsigned char ha[ETH_ALEN];
+ struct mlx5e_priv *priv;
+ bool neigh_connected;
+ u8 nud_state, dead;
+
+ rtnl_lock();
+
+ /* If these parameters are changed after we release the lock,
+ * we'll receive another event letting us know about it.
+ * We use this lock to avoid inconsistency between the neigh validity
+ * and it's hw address.
+ */
+ read_lock_bh(&n->lock);
+ memcpy(ha, n->ha, ETH_ALEN);
+ nud_state = n->nud_state;
+ dead = n->dead;
+ read_unlock_bh(&n->lock);
+
+ neigh_connected = (nud_state & NUD_VALID) && !dead;
+
+ trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
+
+ list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ if (!mlx5e_encap_take(e))
+ continue;
+
+ priv = netdev_priv(e->out_dev);
+ mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ mlx5e_encap_put(priv, e);
+ }
+ mlx5e_rep_neigh_entry_release(nhe);
+ rtnl_unlock();
+ neigh_release(n);
+}
+
+static void mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe,
+ struct neighbour *n)
+{
+ /* Take a reference to ensure the neighbour and mlx5 encap
+ * entry won't be destructed until we drop the reference in
+ * delayed work.
+ */
+ neigh_hold(n);
+
+ /* This assignment is valid as long as the the neigh reference
+ * is taken
+ */
+ nhe->n = n;
+
+ if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
+ mlx5e_rep_neigh_entry_release(nhe);
+ neigh_release(n);
+ }
+}
+
+static int mlx5e_rep_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
+ neigh_update.netevent_nb);
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct net_device *netdev = rpriv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
+ struct mlx5e_neigh m_neigh = {};
+ struct neigh_parms *p;
+ struct neighbour *n;
+ bool found = false;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ n = ptr;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
+#else
+ if (n->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ m_neigh.dev = n->dev;
+ m_neigh.family = n->ops->family;
+ memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+
+ rcu_read_lock();
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
+ rcu_read_unlock();
+ if (!nhe)
+ return NOTIFY_DONE;
+
+ mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
+ break;
+
+ case NETEVENT_DELAY_PROBE_TIME_UPDATE:
+ p = ptr;
+
+ /* We check the device is present since we don't care about
+ * changes in the default table, we only care about changes
+ * done per device delay prob time parameter.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
+#else
+ if (!p->dev || p->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
+ neigh_list) {
+ if (p->dev == nhe->m_neigh.dev) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found)
+ return NOTIFY_DONE;
+
+ neigh_update->min_interval = min_t(unsigned long,
+ NEIGH_VAR(p, DELAY_PROBE_TIME),
+ neigh_update->min_interval);
+ mlx5_fc_update_sampling_interval(priv->mdev,
+ neigh_update->min_interval);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static const struct rhashtable_params mlx5e_neigh_ht_params = {
+ .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
+ .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
+ .key_len = sizeof(struct mlx5e_neigh),
+ .automatic_shrinking = true,
+};
+
+int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ int err;
+
+ err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&neigh_update->neigh_list);
+ mutex_init(&neigh_update->encap_lock);
+ INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
+ mlx5e_rep_neigh_stats_work);
+ mlx5e_rep_neigh_update_init_interval(rpriv);
+
+ rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
+ err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
+ if (err)
+ goto out_err;
+ return 0;
+
+out_err:
+ rhashtable_destroy(&neigh_update->neigh_ht);
+ return err;
+}
+
+void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+
+ unregister_netevent_notifier(&neigh_update->netevent_nb);
+
+ flush_workqueue(priv->wq); /* flush neigh update works */
+
+ cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
+
+ mutex_destroy(&neigh_update->encap_lock);
+ rhashtable_destroy(&neigh_update->neigh_ht);
+}
+
+static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ int err;
+
+ err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
+ &nhe->rhash_node,
+ mlx5e_neigh_ht_params);
+ if (err)
+ return err;
+
+ list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
+
+ return err;
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
+
+ mutex_lock(&rpriv->neigh_update.encap_lock);
+
+ list_del_rcu(&nhe->neigh_list);
+
+ rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
+ &nhe->rhash_node,
+ mlx5e_neigh_ht_params);
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+}
+
+/* This function must only be called under the representor's encap_lock or
+ * inside rcu read lock section.
+ */
+struct mlx5e_neigh_hash_entry *
+mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
+ struct mlx5e_neigh *m_neigh)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_neigh_hash_entry *nhe;
+
+ nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
+ mlx5e_neigh_ht_params);
+ return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
+}
+
+int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh_hash_entry **nhe)
+{
+ int err;
+
+ *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
+ if (!*nhe)
+ return -ENOMEM;
+
+ (*nhe)->priv = priv;
+ memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
+ INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
+ spin_lock_init(&(*nhe)->encap_list_lock);
+ INIT_LIST_HEAD(&(*nhe)->encap_list);
+ refcount_set(&(*nhe)->refcnt, 1);
+
+ err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ kfree(*nhe);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
new file mode 100644
index 000000000000..32b239189c95
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_REP_NEIGH__
+#define __MLX5_EN_REP_NEIGH__
+
+#include "en.h"
+#include "en_rep.h"
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv);
+void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv);
+
+struct mlx5e_neigh_hash_entry *
+mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
+ struct mlx5e_neigh *m_neigh);
+int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh_hash_entry **nhe);
+void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe);
+
+void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline int
+mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) { return 0; }
+static inline void
+mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) {}
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
+#endif /* __MLX5_EN_REP_NEIGH__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
new file mode 100644
index 000000000000..80713123de5c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#include <net/dst_metadata.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include "tc.h"
+#include "neigh.h"
+#include "en_rep.h"
+#include "eswitch.h"
+#include "esw/chains.h"
+#include "en/tc_ct.h"
+#include "en/mapping.h"
+#include "en/tc_tun.h"
+#include "lib/port_tun.h"
+
+struct mlx5e_rep_indr_block_priv {
+ struct net_device *netdev;
+ struct mlx5e_rep_priv *rpriv;
+
+ struct list_head list;
+};
+
+int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
+ struct mlx5e_neigh_hash_entry *nhe;
+ int err;
+
+ err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
+ if (err)
+ return err;
+
+ mutex_lock(&rpriv->neigh_update.encap_lock);
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ if (!nhe) {
+ err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
+ if (err) {
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+ mlx5_tun_entropy_refcount_dec(tun_entropy,
+ e->reformat_type);
+ return err;
+ }
+ }
+
+ e->nhe = nhe;
+ spin_lock(&nhe->encap_list_lock);
+ list_add_rcu(&e->encap_list, &nhe->encap_list);
+ spin_unlock(&nhe->encap_list_lock);
+
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+
+ return 0;
+}
+
+void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
+
+ if (!e->nhe)
+ return;
+
+ spin_lock(&e->nhe->encap_list_lock);
+ list_del_rcu(&e->encap_list);
+ spin_unlock(&e->nhe->encap_list_lock);
+
+ mlx5e_rep_neigh_entry_release(e->nhe);
+ e->nhe = NULL;
+ mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
+}
+
+void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ bool neigh_connected,
+ unsigned char ha[ETH_ALEN])
+{
+ struct ethhdr *eth = (struct ethhdr *)e->encap_header;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool encap_connected;
+ LIST_HEAD(flow_list);
+
+ ASSERT_RTNL();
+
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&e->res_ready);
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+ if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+ ether_addr_equal(e->h_dest, ha)))
+ goto unlock;
+
+ mlx5e_take_all_encap_flows(e, &flow_list);
+
+ if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
+ (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
+ mlx5e_tc_encap_flows_del(priv, e, &flow_list);
+
+ if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ ether_addr_copy(e->h_dest, ha);
+ ether_addr_copy(eth->h_dest, ha);
+ /* Update the encap source mac, in case that we delete
+ * the flows when encap source mac changed.
+ */
+ ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
+
+ mlx5e_tc_encap_flows_add(priv, e, &flow_list);
+ }
+unlock:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ mlx5e_put_encap_flow_list(priv, &flow_list);
+}
+
+static int
+mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
+ struct flow_cls_offload *cls_flower, int flags)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
+ flags);
+ case FLOW_CLS_DESTROY:
+ return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
+ flags);
+ case FLOW_CLS_STATS:
+ return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
+ flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static
+int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ switch (ma->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlx5e_tc_configure_matchall(priv, ma);
+ case TC_CLSMATCHALL_DESTROY:
+ return mlx5e_tc_delete_matchall(priv, ma);
+ case TC_CLSMATCHALL_STATS:
+ mlx5e_tc_stats_matchall(priv, ma);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
+ struct mlx5e_priv *priv = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
+ case TC_SETUP_CLSMATCHALL:
+ return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload tmp, *f = type_data;
+ struct mlx5e_priv *priv = cb_priv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ flags = MLX5_TC_FLAG(INGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+ esw = priv->mdev->priv.eswitch;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ memcpy(&tmp, f, sizeof(*f));
+
+ if (!mlx5_esw_chains_prios_supported(esw))
+ return -EOPNOTSUPP;
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
+ */
+ if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
+ return -EOPNOTSUPP;
+ if (tmp.common.chain_index != 0)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
+static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
+int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct flow_block_offload *f = type_data;
+
+ f->unlocked_driver_cb = true;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &mlx5e_rep_block_tc_cb_list,
+ mlx5e_rep_setup_tc_cb,
+ priv, priv, true);
+ case TC_SETUP_FT:
+ return flow_block_cb_setup_simple(type_data,
+ &mlx5e_rep_block_ft_cb_list,
+ mlx5e_rep_setup_ft_cb,
+ priv, priv, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+ int err;
+
+ mutex_init(&uplink_priv->unready_flows_lock);
+ INIT_LIST_HEAD(&uplink_priv->unready_flows);
+
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ return err;
+}
+
+void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
+{
+ /* delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
+}
+
+void mlx5e_rep_tc_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
+ mlx5e_tc_reoffload_flows_work);
+}
+
+void mlx5e_rep_tc_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
+}
+
+int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
+
+ return NOTIFY_OK;
+}
+
+static struct mlx5e_rep_indr_block_priv *
+mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
+{
+ struct mlx5e_rep_indr_block_priv *cb_priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv,
+ &rpriv->uplink_priv.tc_indr_block_priv_list,
+ list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static int
+mlx5e_rep_indr_offload(struct net_device *netdev,
+ struct flow_cls_offload *flower,
+ struct mlx5e_rep_indr_block_priv *indr_priv,
+ unsigned long flags)
+{
+ struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
+ int err = 0;
+
+ switch (flower->command) {
+ case FLOW_CLS_REPLACE:
+ err = mlx5e_configure_flower(netdev, priv, flower, flags);
+ break;
+ case FLOW_CLS_DESTROY:
+ err = mlx5e_delete_flower(netdev, priv, flower, flags);
+ break;
+ case FLOW_CLS_STATS:
+ err = mlx5e_stats_flower(netdev, priv, flower, flags);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
+{
+ unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
+ struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
+ flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
+{
+ struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+ struct flow_cls_offload *f = type_data;
+ struct flow_cls_offload tmp;
+ struct mlx5e_priv *mpriv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ mpriv = netdev_priv(priv->rpriv->netdev);
+ esw = mpriv->mdev->priv.eswitch;
+
+ flags = MLX5_TC_FLAG(EGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ memcpy(&tmp, f, sizeof(*f));
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
+ */
+ if (!mlx5_esw_chains_prios_supported(esw) ||
+ tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
+ tmp.common.chain_index)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx5e_rep_indr_block_unbind(void *cb_priv)
+{
+ struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
+
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+}
+
+static LIST_HEAD(mlx5e_block_cb_list);
+
+static int
+mlx5e_rep_indr_setup_block(struct net_device *netdev,
+ struct mlx5e_rep_priv *rpriv,
+ struct flow_block_offload *f,
+ flow_setup_cb_t *setup_cb)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5e_rep_indr_block_priv *indr_priv;
+ struct flow_block_cb *block_cb;
+
+ if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
+ !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->unlocked_driver_cb = true;
+ f->driver_block_list = &mlx5e_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (indr_priv)
+ return -EEXIST;
+
+ indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
+ if (!indr_priv)
+ return -ENOMEM;
+
+ indr_priv->netdev = netdev;
+ indr_priv->rpriv = rpriv;
+ list_add(&indr_priv->list,
+ &rpriv->uplink_priv.tc_indr_block_priv_list);
+
+ block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+ mlx5e_rep_indr_block_unbind);
+ if (IS_ERR(block_cb)) {
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+ return PTR_ERR(block_cb);
+ }
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
+
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (!indr_priv)
+ return -ENOENT;
+
+ block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static
+int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+ mlx5e_rep_indr_setup_tc_cb);
+ case TC_SETUP_FT:
+ return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+ mlx5e_rep_indr_setup_ft_cb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
+
+ return flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv);
+}
+
+void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
+{
+ flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
+ mlx5e_rep_indr_setup_tc_cb);
+}
+
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct tunnel_match_enc_opts enc_opts = {};
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct metadata_dst *tun_dst;
+ struct tunnel_match_key key;
+ u32 tun_id, enc_opts_id;
+ struct net_device *dev;
+ int err;
+
+ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
+ tun_id = tunnel_id >> ENC_OPTS_BITS;
+
+ if (!tun_id)
+ return true;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
+ if (err) {
+ WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel for tun_id: %d, err: %d\n",
+ tun_id, err);
+ return false;
+ }
+
+ if (enc_opts_id) {
+ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id, &enc_opts);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
+ enc_opts_id, err);
+ return false;
+ }
+ }
+
+ tun_dst = tun_rx_dst(enc_opts.key.len);
+ if (!tun_dst) {
+ WARN_ON_ONCE(true);
+ return false;
+ }
+
+ ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+ key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ 0, /* label */
+ key.enc_tp.src, key.enc_tp.dst,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ TUNNEL_KEY);
+
+ if (enc_opts.key.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+ enc_opts.key.data,
+ enc_opts.key.len,
+ enc_opts.key.dst_opt_type);
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ dev = dev_get_by_index(&init_net, key.filter_ifindex);
+ if (!dev) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel device with ifindex: %d\n",
+ key.filter_ifindex);
+ return false;
+ }
+
+ /* Set tun_dev so we do dev_put() after datapath */
+ tc_priv->tun_dev = dev;
+
+ skb->dev = dev;
+
+ return true;
+}
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ int tunnel_moffset;
+ int err;
+
+ reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
+ reg_c0 = 0;
+ reg_c1 = be32_to_cpu(cqe->ft_metadata);
+
+ if (!reg_c0)
+ return true;
+
+ priv = netdev_priv(skb->dev);
+ esw = priv->mdev->priv.eswitch;
+
+ err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find chain for chain tag: %d, err: %d\n",
+ reg_c0, err);
+ return false;
+ }
+
+ if (chain) {
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+
+ tc_skb_ext->chain = chain;
+
+ tuple_id = reg_c1 & TUPLE_ID_MAX;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
+ return false;
+ }
+
+ tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
+ tunnel_id = reg_c1 >> (8 * tunnel_moffset);
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+ return true;
+}
+
+void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (tc_priv->tun_dev)
+ dev_put(tc_priv->tun_dev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
new file mode 100644
index 000000000000..fdf9702c2d7d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_REP_TC_H__
+#define __MLX5_EN_REP_TC_H__
+
+#include <linux/skbuff.h>
+#include "en_tc.h"
+#include "en_rep.h"
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv);
+void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv);
+
+int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv);
+void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv);
+
+void mlx5e_rep_tc_enable(struct mlx5e_priv *priv);
+void mlx5e_rep_tc_disable(struct mlx5e_priv *priv);
+
+int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv);
+
+void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ bool neigh_connected,
+ unsigned char ha[ETH_ALEN]);
+
+int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e);
+
+int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv);
+void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+struct mlx5e_rep_priv;
+static inline int
+mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) { return 0; }
+static inline void
+mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) {}
+
+static inline int
+mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) { return 0; }
+static inline void
+mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) {}
+
+static inline void
+mlx5e_rep_tc_enable(struct mlx5e_priv *priv) {}
+static inline void
+mlx5e_rep_tc_disable(struct mlx5e_priv *priv) {}
+
+static inline int
+mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) { return NOTIFY_DONE; }
+
+static inline int
+mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data) { return -EOPNOTSUPP; }
+
+struct mlx5e_tc_update_priv;
+static inline bool
+mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv) { return true; }
+static inline void
+mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) {}
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
+#endif /* __MLX5_EN_REP_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 4eb305af0106..afc19dca1f5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -24,6 +24,7 @@
#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
+#define MLX5_CT_STATE_NAT_BIT BIT(3)
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
@@ -61,6 +62,15 @@ struct mlx5_ct_zone_rule {
bool nat;
};
+struct mlx5_tc_ct_pre {
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_group *flow_grp;
+ struct mlx5_flow_group *miss_grp;
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_handle *miss_rule;
+ struct mlx5_modify_hdr *modify_hdr;
+};
+
struct mlx5_ct_ft {
struct rhash_head node;
u16 zone;
@@ -68,14 +78,14 @@ struct mlx5_ct_ft {
struct nf_flowtable *nf_ft;
struct mlx5_tc_ct_priv *ct_priv;
struct rhashtable ct_entries_ht;
+ struct mlx5_tc_ct_pre pre_ct;
+ struct mlx5_tc_ct_pre pre_ct_nat;
};
struct mlx5_ct_entry {
u16 zone;
struct rhash_head node;
- struct flow_rule *flow_rule;
struct mlx5_fc *counter;
- unsigned long lastuse;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_zone_rule zone_rules[2];
@@ -109,7 +119,7 @@ mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
}
static int
-mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec,
+mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
struct flow_rule *rule)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -124,10 +134,8 @@ mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec,
flow_rule_match_basic(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(match.mask->n_proto));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(match.key->n_proto));
+ mlx5e_tc_set_ethertype(priv->mdev, &match, true, headers_c,
+ headers_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
@@ -384,7 +392,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
char *modact;
int err, i;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -428,6 +436,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_modify_hdr *mod_hdr;
struct flow_action_entry *meta;
+ u16 ct_state = 0;
int err;
meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
@@ -446,11 +455,13 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
&mod_acts);
if (err)
goto err_mapping;
+
+ ct_state |= MLX5_CT_STATE_NAT_BIT;
}
+ ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT;
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
- (MLX5_CT_STATE_ESTABLISHED_BIT |
- MLX5_CT_STATE_TRK_BIT),
+ ct_state,
meta->ct_metadata.mark,
meta->ct_metadata.labels[0],
tupleid);
@@ -520,7 +531,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->counter = entry->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
- mlx5_tc_ct_set_tuple_match(spec, flow_rule);
+ mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
entry->zone & MLX5_CT_ZONE_MASK,
MLX5_CT_ZONE_MASK);
@@ -603,7 +614,6 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
return -ENOMEM;
entry->zone = ft->zone;
- entry->flow_rule = flow_rule;
entry->cookie = flow->cookie;
entry->restore_cookie = meta_action->ct_metadata.cookie;
@@ -687,7 +697,7 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
return mlx5_tc_ct_block_flow_offload_stats(ft, f);
default:
break;
- };
+ }
return -EOPNOTSUPP;
}
@@ -795,6 +805,238 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
return 0;
}
+static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
+ struct mlx5_tc_ct_pre *pre_ct,
+ bool nat)
+{
+ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
+ struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_flow_table *fdb = pre_ct->fdb;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 ctstate;
+ u16 zone;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
+ err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ZONE_TO_REG, zone);
+ if (err) {
+ ct_dbg("Failed to set zone register mapping");
+ goto err_mapping;
+ }
+
+ mod_hdr = mlx5_modify_header_alloc(dev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ pre_mod_acts.num_actions,
+ pre_mod_acts.actions);
+
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ ct_dbg("Failed to create pre ct mod hdr");
+ goto err_mapping;
+ }
+ pre_ct->modify_hdr = mod_hdr;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act.modify_hdr = mod_hdr;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ /* add flow rule */
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
+ zone, MLX5_CT_ZONE_MASK);
+ ctstate = MLX5_CT_STATE_TRK_BIT;
+ if (nat)
+ ctstate |= MLX5_CT_STATE_NAT_BIT;
+ mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
+
+ dest.ft = ct_priv->post_ct;
+ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add pre ct flow rule zone %d", zone);
+ goto err_flow_rule;
+ }
+ pre_ct->flow_rule = rule;
+
+ /* add miss rule */
+ memset(spec, 0, sizeof(*spec));
+ dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add pre ct miss rule zone %d", zone);
+ goto err_miss_rule;
+ }
+ pre_ct->miss_rule = rule;
+
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ kvfree(spec);
+ return 0;
+
+err_miss_rule:
+ mlx5_del_flow_rules(pre_ct->flow_rule);
+err_flow_rule:
+ mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
+err_mapping:
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ kvfree(spec);
+ return err;
+}
+
+static void
+tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft,
+ struct mlx5_tc_ct_pre *pre_ct)
+{
+ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
+ struct mlx5_core_dev *dev = ct_priv->esw->dev;
+
+ mlx5_del_flow_rules(pre_ct->flow_rule);
+ mlx5_del_flow_rules(pre_ct->miss_rule);
+ mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
+}
+
+static int
+mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
+ struct mlx5_tc_ct_pre *pre_ct,
+ bool nat)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
+ struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ u32 metadata_reg_c_2_mask;
+ u32 *flow_group_in;
+ void *misc;
+ int err;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ err = -EOPNOTSUPP;
+ ct_dbg("Failed to get FDB flow namespace");
+ return err;
+ }
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_TC_OFFLOAD;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ ct_dbg("Failed to create pre ct table");
+ goto out_free;
+ }
+ pre_ct->fdb = ft;
+
+ /* create flow group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ misc = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria.misc_parameters_2);
+
+ metadata_reg_c_2_mask = MLX5_CT_ZONE_MASK;
+ metadata_reg_c_2_mask |= (MLX5_CT_STATE_TRK_BIT << 16);
+ if (nat)
+ metadata_reg_c_2_mask |= (MLX5_CT_STATE_NAT_BIT << 16);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_2,
+ metadata_reg_c_2_mask);
+
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ ct_dbg("Failed to create pre ct group");
+ goto err_flow_grp;
+ }
+ pre_ct->flow_grp = g;
+
+ /* create miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ ct_dbg("Failed to create pre ct miss group");
+ goto err_miss_grp;
+ }
+ pre_ct->miss_grp = g;
+
+ err = tc_ct_pre_ct_add_rules(ct_ft, pre_ct, nat);
+ if (err)
+ goto err_add_rules;
+
+ kvfree(flow_group_in);
+ return 0;
+
+err_add_rules:
+ mlx5_destroy_flow_group(pre_ct->miss_grp);
+err_miss_grp:
+ mlx5_destroy_flow_group(pre_ct->flow_grp);
+err_flow_grp:
+ mlx5_destroy_flow_table(ft);
+out_free:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void
+mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft,
+ struct mlx5_tc_ct_pre *pre_ct)
+{
+ tc_ct_pre_ct_del_rules(ct_ft, pre_ct);
+ mlx5_destroy_flow_group(pre_ct->miss_grp);
+ mlx5_destroy_flow_group(pre_ct->flow_grp);
+ mlx5_destroy_flow_table(pre_ct->fdb);
+}
+
+static int
+mlx5_tc_ct_alloc_pre_ct_tables(struct mlx5_ct_ft *ft)
+{
+ int err;
+
+ err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct, false);
+ if (err)
+ return err;
+
+ err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct_nat, true);
+ if (err)
+ goto err_pre_ct_nat;
+
+ return 0;
+
+err_pre_ct_nat:
+ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
+ return err;
+}
+
+static void
+mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft)
+{
+ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct_nat);
+ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
+}
+
static struct mlx5_ct_ft *
mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
struct nf_flowtable *nf_ft)
@@ -817,6 +1059,10 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
ft->ct_priv = ct_priv;
refcount_set(&ft->refcount, 1);
+ err = mlx5_tc_ct_alloc_pre_ct_tables(ft);
+ if (err)
+ goto err_alloc_pre_ct;
+
err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
if (err)
goto err_init;
@@ -838,6 +1084,8 @@ err_add_cb:
err_insert:
rhashtable_destroy(&ft->ct_entries_ht);
err_init:
+ mlx5_tc_ct_free_pre_ct_tables(ft);
+err_alloc_pre_ct:
kfree(ft);
return ERR_PTR(err);
}
@@ -863,21 +1111,40 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
rhashtable_free_and_destroy(&ft->ct_entries_ht,
mlx5_tc_ct_flush_ft_entry,
ct_priv);
+ mlx5_tc_ct_free_pre_ct_tables(ft);
kfree(ft);
}
/* We translate the tc filter with CT action to the following HW model:
*
- * +-------------------+ +--------------------+ +--------------+
- * + pre_ct (tc chain) +----->+ CT (nat or no nat) +--->+ post_ct +----->
- * + original match + | + tuple + zone match + | + fte_id match + |
- * +-------------------+ | +--------------------+ | +--------------+ |
- * v v v
- * set chain miss mapping set mark original
- * set fte_id set label filter
- * set zone set established actions
- * set tunnel_id do nat (if needed)
- * do decap
+ * +---------------------+
+ * + fdb prio (tc chain) +
+ * + original match +
+ * +---------------------+
+ * | set chain miss mapping
+ * | set fte_id
+ * | set tunnel_id
+ * | do decap
+ * v
+ * +---------------------+
+ * + pre_ct/pre_ct_nat + if matches +---------------------+
+ * + zone+nat match +---------------->+ post_ct (see below) +
+ * +---------------------+ set zone +---------------------+
+ * | set zone
+ * v
+ * +--------------------+
+ * + CT (nat or no nat) +
+ * + tuple + zone match +
+ * +--------------------+
+ * | set mark
+ * | set label
+ * | set established
+ * | do nat (if needed)
+ * v
+ * +--------------+
+ * + post_ct + original filter actions
+ * + fte_id match +------------------------>
+ * +--------------+
*/
static int
__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
@@ -892,7 +1159,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
struct mlx5_flow_spec *post_ct_spec = NULL;
struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_esw_flow_attr *pre_ct_attr;
- struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
int chain_mapping = 0, err;
@@ -955,14 +1222,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
goto err_mapping;
}
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ZONE_TO_REG,
- attr->ct_attr.zone &
- MLX5_CT_ZONE_MASK);
- if (err) {
- ct_dbg("Failed to set zone register mapping");
- goto err_mapping;
- }
-
err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
FTEID_TO_REG, fte_id);
if (err) {
@@ -1022,7 +1281,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
/* Change original rule point to ct table */
pre_ct_attr->dest_chain = 0;
- pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.fdb : ft->pre_ct.fdb;
ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
orig_spec,
pre_ct_attr);
@@ -1132,7 +1391,7 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
{
bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
- struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *rule = ERR_PTR(-EINVAL);
int err;
if (!ct_priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index b45c3f46570b..7cce85faa16f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -4,8 +4,11 @@
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
+#include <net/bareudp.h>
#include "en/tc_tun.h"
#include "en_tc.h"
+#include "rep/tc.h"
+#include "rep/neigh.h"
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
{
@@ -16,6 +19,8 @@ struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
else if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev))
return &gre_tunnel;
+ else if (netif_is_bareudp(tunnel_dev))
+ return &mplsoudp_tunnel;
else
return NULL;
}
@@ -96,9 +101,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
}
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
- ret = PTR_ERR_OR_ZERO(rt);
- if (ret)
- return ret;
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
ip_rt_put(rt);
@@ -508,6 +512,13 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_basic key_basic = {};
+ struct flow_dissector_key_basic mask_basic = {
+ .n_proto = htons(0xFFFF),
+ };
+ struct flow_match_basic match_basic = {
+ .key = &key_basic, .mask = &mask_basic,
+ };
struct flow_match_control match;
u16 addr_type;
@@ -533,10 +544,9 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
ntohl(match.key->dst));
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IP);
+ key_basic.n_proto = htons(ETH_P_IP);
+ mlx5e_tc_set_ethertype(priv->mdev, &match_basic, true,
+ headers_c, headers_v);
} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs match;
@@ -559,10 +569,9 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
&match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
ipv6));
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IPV6);
+ key_basic.n_proto = htons(ETH_P_IPV6);
+ mlx5e_tc_set_ethertype(priv->mdev, &match_basic, true,
+ headers_c, headers_v);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 1630f0ec3ad7..704359df6095 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -16,6 +16,7 @@ enum {
MLX5E_TC_TUNNEL_TYPE_VXLAN,
MLX5E_TC_TUNNEL_TYPE_GENEVE,
MLX5E_TC_TUNNEL_TYPE_GRETAP,
+ MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
};
struct mlx5e_tc_tunnel {
@@ -46,6 +47,7 @@ struct mlx5e_tc_tunnel {
extern struct mlx5e_tc_tunnel vxlan_tunnel;
extern struct mlx5e_tc_tunnel geneve_tunnel;
extern struct mlx5e_tc_tunnel gre_tunnel;
+extern struct mlx5e_tc_tunnel mplsoudp_tunnel;
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
new file mode 100644
index 000000000000..1f9526244222
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/bareudp.h>
+#include <net/mpls.h>
+#include "en/tc_tun.h"
+
+static bool can_offload(struct mlx5e_priv *priv)
+{
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l3_tunnel_to_l2);
+}
+
+static int calc_hlen(struct mlx5e_encap_entry *e)
+{
+ return sizeof(struct udphdr) + MPLS_HLEN;
+}
+
+static int init_encap_attr(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel = &mplsoudp_tunnel;
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ return 0;
+}
+
+static int generate_ip_tun_hdr(char buf[],
+ __u8 *ip_proto,
+ struct mlx5e_encap_entry *r)
+{
+ const struct ip_tunnel_key *tun_key = &r->tun_info->key;
+ struct udphdr *udp = (struct udphdr *)(buf);
+ struct mpls_shim_hdr *mpls;
+ u32 tun_id;
+
+ tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
+ mpls = (struct mpls_shim_hdr *)(udp + 1);
+ *ip_proto = IPPROTO_UDP;
+
+ udp->dest = tun_key->tp_dst;
+ *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
+
+ return 0;
+}
+
+static int parse_udp_ports(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ return mlx5e_tc_tun_parse_udp_ports(priv, spec, f, headers_c, headers_v);
+}
+
+static int parse_tunnel(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_enc_keyid enc_keyid;
+ struct flow_match_mpls match;
+ void *misc2_c;
+ void *misc2_v;
+
+ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
+ return 0;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return 0;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
+ if (!enc_keyid.mask->keyid)
+ return 0;
+
+ if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
+ MLX5_FLEX_PROTO_CW_MPLS_UDP))
+ return -EOPNOTSUPP;
+
+ flow_rule_match_mpls(rule, &match);
+
+ /* Only support matching the first LSE */
+ if (match.mask->used_lses != 1)
+ return -EOPNOTSUPP;
+
+ MLX5_SET(fte_match_set_misc2, misc2_c,
+ outer_first_mpls_over_udp.mpls_label,
+ match.mask->ls[0].mpls_label);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp.mpls_label,
+ match.key->ls[0].mpls_label);
+
+ MLX5_SET(fte_match_set_misc2, misc2_c,
+ outer_first_mpls_over_udp.mpls_exp,
+ match.mask->ls[0].mpls_tc);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp.mpls_exp, match.key->ls[0].mpls_tc);
+
+ MLX5_SET(fte_match_set_misc2, misc2_c,
+ outer_first_mpls_over_udp.mpls_s_bos,
+ match.mask->ls[0].mpls_bos);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp.mpls_s_bos,
+ match.key->ls[0].mpls_bos);
+
+ MLX5_SET(fte_match_set_misc2, misc2_c,
+ outer_first_mpls_over_udp.mpls_ttl,
+ match.mask->ls[0].mpls_ttl);
+ MLX5_SET(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp.mpls_ttl,
+ match.key->ls[0].mpls_ttl);
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ return 0;
+}
+
+struct mlx5e_tc_tunnel mplsoudp_tunnel = {
+ .tunnel_type = MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
+ .match_level = MLX5_MATCH_L4,
+ .can_offload = can_offload,
+ .calc_hlen = calc_hlen,
+ .init_encap_attr = init_encap_attr,
+ .generate_ip_tun_hdr = generate_ip_tun_hdr,
+ .parse_udp_ports = parse_udp_ports,
+ .parse_tunnel = parse_tunnel,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index f07b1399744e..bfd3e1161bc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,46 +6,32 @@
#include "en.h"
-#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
- MLX5E_SQ_NOPS_ROOM)
-
-#ifndef CONFIG_MLX5_EN_TLS
-#define MLX5E_SQ_TLS_ROOM (0)
-#else
-/* TLS offload requires additional stop_room for:
- * - a resync SKB.
- * kTLS offload requires fixed additional stop_room for:
- * - a static params WQE, and a progress params WQE.
- * The additional MTU-depending room for the resync DUMP WQEs
- * will be calculated and added in runtime.
- */
-#define MLX5E_SQ_TLS_ROOM \
- (MLX5_SEND_WQE_MAX_WQEBBS + \
- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
-#endif
-
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+enum mlx5e_icosq_wqe_type {
+ MLX5E_ICOSQ_WQE_NOP,
+ MLX5E_ICOSQ_WQE_UMR_RX,
+};
+
static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
}
-static inline void *
-mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
+static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
void *wqe;
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
- memset(wqe, 0, size);
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ memset(wqe, 0, wqe_size);
return wqe;
}
+#define MLX5E_TX_FETCH_WQE(sq, pi) \
+ ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
+
static inline struct mlx5e_tx_wqe *
mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{
@@ -81,6 +67,84 @@ mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
return wqe;
}
+struct mlx5e_tx_wqe_info {
+ struct sk_buff *skb;
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+#ifdef CONFIG_MLX5_EN_TLS
+ struct page *resync_dump_frag_page;
+#endif
+};
+
+static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_tx_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+ sq->stats->nop += contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
+
+ return pi;
+}
+
+struct mlx5e_icosq_wqe_info {
+ u8 wqe_type;
+ u8 num_wqebbs;
+
+ /* Auxiliary data for different wqe types. */
+ union {
+ struct {
+ struct mlx5e_rq *rq;
+ } umr;
+ };
+};
+
+static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_icosq_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_NOP,
+ .num_wqebbs = 1,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
+
+ return pi;
+}
+
static inline void
mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
u16 pi, u16 nnops)
@@ -102,7 +166,7 @@ static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
- ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@@ -189,6 +253,22 @@ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
}
}
+static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
+ struct mlx5_err_cqe *err_cqe)
+{
+ struct mlx5_cqwq *wq = &cq->wq;
+ u32 ci;
+
+ ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
+
+ netdev_err(cq->channel->netdev,
+ "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+ cq->mcq.cqn, ci, sqn,
+ get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
+ err_cqe->syndrome, err_cqe->vendor_err_synd);
+ mlx5_dump_err_cqe(cq->mdev, err_cqe);
+}
+
/* SW parser related functions */
struct mlx5e_swp_spec {
@@ -232,4 +312,25 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
+static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
+{
+ BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
+
+ /* A WQE must not cross the page boundary, hence two conditions:
+ * 1. Its size must not exceed the page size.
+ * 2. If the WQE size is X, and the space remaining in a page is less
+ * than X, this space needs to be padded with NOPs. So, one WQE of
+ * size X may require up to X-1 WQEBBs of padding, which makes the
+ * stop room of X-1 + X.
+ * WQE size is also limited by the hardware limit.
+ */
+
+ if (__builtin_constant_p(wqe_size))
+ BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
+ else
+ WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
+
+ return wqe_size * 2 - 1;
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index f049e0ac308a..c9d308e91965 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -31,7 +31,7 @@
*/
#include <linux/bpf_trace.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include "en/xdp.h"
#include "en/params.h"
@@ -64,14 +64,14 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
return false;
xdptxd.data = xdpf->data;
xdptxd.len = xdpf->len;
- if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) {
+ if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
/* The xdp_buff was in the UMEM and was copied into a newly
* allocated page. The UMEM page was returned via the ZCA, and
* this new page has to be mapped at this point and has to be
@@ -97,10 +97,10 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = dma_addr;
} else {
- /* Driver assumes that convert_to_xdp_frame returns an xdp_frame
- * that points to the same memory region as the original
- * xdp_buff. It allows to map the memory only once and to use
- * the DMA_BIDIRECTIONAL mode.
+ /* Driver assumes that xdp_convert_buff_to_frame returns
+ * an xdp_frame that points to the same memory region as
+ * the original xdp_buff. It allows to map the memory only
+ * once and to use the DMA_BIDIRECTIONAL mode.
*/
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
@@ -119,49 +119,33 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len, bool xsk)
+ u32 *len, struct xdp_buff *xdp)
{
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
- struct xdp_umem *umem = rq->umem;
- struct xdp_buff xdp;
u32 act;
int err;
if (!prog)
return false;
- xdp.data = va + *rx_headroom;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.data_hard_start = va;
- if (xsk)
- xdp.handle = di->xsk.handle;
- xdp.rxq = &rq->xdp_rxq;
-
- act = bpf_prog_run_xdp(prog, &xdp);
- if (xsk) {
- u64 off = xdp.data - xdp.data_hard_start;
-
- xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off);
- }
+ act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
- *rx_headroom = xdp.data - xdp.data_hard_start;
- *len = xdp.data_end - xdp.data;
+ *len = xdp->data_end - xdp->data;
return false;
case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, &xdp)))
+ if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
case XDP_REDIRECT:
/* When XDP enabled then page-refcnt==1 here */
- err = xdp_do_redirect(rq->netdev, &xdp, prog);
+ err = xdp_do_redirect(rq->netdev, xdp, prog);
if (unlikely(err))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
- if (!xsk)
+ if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
mlx5e_page_dma_unmap(rq, di);
rq->stats->xdp_redirect++;
return true;
@@ -178,20 +162,43 @@ xdp_abort:
}
}
-static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
+static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
- struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi, contig_wqebbs;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs < size)) {
+ struct mlx5e_xdp_wqe_info *wi, *edge_wi;
+
+ wi = &sq->db.wqe_info[pi];
+ edge_wi = wi + contig_wqebbs;
+
+ /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
+ for (; wi < edge_wi; wi++) {
+ *wi = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = 1,
+ .num_pkts = 0,
+ };
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+ sq->stats->nops += contig_wqebbs;
- if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS))
- mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ }
+
+ return pi;
+}
- session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi);
+static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+ u16 pi;
+
+ pi = mlx5e_xdpsq_get_next_pi(sq, MLX5_SEND_WQE_MAX_WQEBBS);
+ session->wqe = MLX5E_TX_FETCH_WQE(sq, pi);
prefetchw(session->wqe->data);
session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
@@ -233,8 +240,10 @@ enum {
static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
+ const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- MLX5E_XDPSQ_STOP_ROOM))) {
+ stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -408,22 +417,15 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
i = 0;
do {
- u16 wqe_counter;
+ struct mlx5e_xdp_wqe_info *wi;
+ u16 wqe_counter, ci;
bool last_wqe;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ))
- netdev_WARN_ONCE(sq->channel->netdev,
- "Bad OP in XDPSQ CQE: 0x%x\n",
- get_cqe_opcode(cqe));
-
do {
- struct mlx5e_xdp_wqe_info *wi;
- u16 ci;
-
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
@@ -432,6 +434,15 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
} while (!last_wqe);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ netdev_WARN_ONCE(sq->channel->netdev,
+ "Bad OP in XDPSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+ (struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
+ }
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xsk_frames)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index d7587f40ecae..ca48c293151b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -40,8 +40,6 @@
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
-#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM)
-
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
@@ -63,7 +61,7 @@
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- void *va, u16 *rx_headroom, u32 *len, bool xsk);
+ u32 *len, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -137,22 +135,10 @@ mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
}
-static inline void
-mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
-
- edge_wi = wi + nnops;
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- wi->num_wqebbs = 1;
- wi->num_pkts = 0;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
-
- sq->stats->nops += nnops;
-}
+struct mlx5e_xdp_wqe_info {
+ u8 num_wqebbs;
+ u8 num_pkts;
+};
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
@@ -186,19 +172,6 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
session->ds_count++;
}
-static inline struct mlx5e_tx_wqe *
-mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
-
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
- memset(wqe, 0, sizeof(*wqe));
-
- return wqe;
-}
-
static inline void
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
struct mlx5e_xdp_info *xi)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 62fc8a128a8d..a33a1f762c70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -3,71 +3,10 @@
#include "rx.h"
#include "en/xdp.h"
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
/* RX data path */
-bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count)
-{
- /* Check in advance that we have enough frames, instead of allocating
- * one-by-one, failing and moving frames to the Reuse Ring.
- */
- return xsk_umem_has_addrs_rq(rq->umem, count);
-}
-
-int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- struct xdp_umem *umem = rq->umem;
- u64 handle;
-
- if (!xsk_umem_peek_addr_rq(umem, &handle))
- return -ENOMEM;
-
- dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle,
- rq->buff.umem_headroom);
- dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle);
-
- /* No need to add headroom to the DMA address. In striding RQ case, we
- * just provide pages for UMR, and headroom is counted at the setup
- * stage when creating a WQE. In non-striding RQ case, headroom is
- * accounted in mlx5e_alloc_rx_wqe.
- */
- dma_info->addr = xdp_umem_get_dma(umem, handle);
-
- xsk_umem_release_addr_rq(umem);
-
- dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
- return 0;
-}
-
-static inline void mlx5e_xsk_recycle_frame(struct mlx5e_rq *rq, u64 handle)
-{
- xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask);
-}
-
-/* XSKRQ uses pages from UMEM, they must not be released. They are returned to
- * the userspace if possible, and if not, this function is called to reuse them
- * in the driver.
- */
-void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- mlx5e_xsk_recycle_frame(rq, dma_info->xsk.handle);
-}
-
-/* Return a frame back to the hardware to fill in again. It is used by XDP when
- * the XDP program returns XDP_TX or XDP_REDIRECT not to an XSKMAP.
- */
-void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
-{
- struct mlx5e_rq *rq = container_of(zca, struct mlx5e_rq, zca);
-
- mlx5e_xsk_recycle_frame(rq, handle);
-}
-
static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
u32 cqe_bcnt)
{
@@ -90,11 +29,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
- struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
- u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom;
+ struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
u32 cqe_bcnt32 = cqe_bcnt;
- void *va, *data;
- u32 frag_size;
bool consumed;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -103,22 +39,20 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
return NULL;
}
- /* head_offset is not used in this function, because di->xsk.data and
- * di->addr point directly to the necessary place. Furthermore, in the
- * current implementation, UMR pages are mapped to XSK frames, so
+ /* head_offset is not used in this function, because xdp->data and the
+ * DMA address point directly to the necessary place. Furthermore, in
+ * the current implementation, UMR pages are mapped to XSK frames, so
* head_offset should always be 0.
*/
WARN_ON_ONCE(head_offset);
- va = di->xsk.data;
- data = va + rx_headroom;
- frag_size = rq->buff.headroom + cqe_bcnt32;
-
- dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL);
- prefetch(data);
+ xdp->data_end = xdp->data + cqe_bcnt32;
+ xdp_set_data_meta_invalid(xdp);
+ xsk_buff_dma_sync_for_cpu(xdp);
+ prefetch(xdp->data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, true);
+ consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
rcu_read_unlock();
/* Possible flows:
@@ -145,7 +79,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt32);
+ return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
@@ -153,25 +87,20 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct mlx5e_dma_info *di = wi->di;
- u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom;
- void *va, *data;
+ struct xdp_buff *xdp = wi->di->xsk;
bool consumed;
- u32 frag_size;
- /* wi->offset is not used in this function, because di->xsk.data and
- * di->addr point directly to the necessary place. Furthermore, in the
- * current implementation, one page = one packet = one frame, so
+ /* wi->offset is not used in this function, because xdp->data and the
+ * DMA address point directly to the necessary place. Furthermore, the
+ * XSK allocator allocates frames per packet, instead of pages, so
* wi->offset should always be 0.
*/
WARN_ON_ONCE(wi->offset);
- va = di->xsk.data;
- data = va + rx_headroom;
- frag_size = rq->buff.headroom + cqe_bcnt;
-
- dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL);
- prefetch(data);
+ xdp->data_end = xdp->data + cqe_bcnt;
+ xdp_set_data_meta_invalid(xdp);
+ xsk_buff_dma_sync_for_cpu(xdp);
+ prefetch(xdp->data);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
@@ -179,7 +108,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
}
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, true);
+ consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp);
rcu_read_unlock();
if (likely(consumed))
@@ -189,5 +118,5 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
* will be handled by mlx5e_put_rx_frag.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt);
+ return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index cab0e93497ae..d147b2f13b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -5,16 +5,10 @@
#define __MLX5_EN_XSK_RX_H__
#include "en.h"
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
/* RX data path */
-bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count);
-int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info);
-void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info);
-void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
u16 cqe_bcnt,
@@ -25,6 +19,23 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
+static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ dma_info->xsk = xsk_buff_alloc(rq->umem);
+ if (!dma_info->xsk)
+ return -ENOMEM;
+
+ /* Store the DMA address without headroom. In striding RQ case, we just
+ * provide pages for UMR, and headroom is counted at the setup stage
+ * when creating a WQE. In non-striding RQ case, headroom is accounted
+ * in mlx5e_alloc_rx_wqe.
+ */
+ dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
+
+ return 0;
+}
+
static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
{
if (!xsk_umem_uses_need_wakeup(rq->umem))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 3bcdb5b2fc20..83dce9cdb8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -5,7 +5,7 @@
#include "umem.h"
#include "en/xdp.h"
#include "en/params.h"
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
@@ -92,12 +92,11 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break;
}
- xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr);
- xdptxd.data = xdp_umem_get_data(umem, desc.addr);
+ xdptxd.dma_addr = xsk_buff_raw_get_dma(umem, desc.addr);
+ xdptxd.data = xsk_buff_raw_get_data(umem, desc.addr);
xdptxd.len = desc.len;
- dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr,
- xdptxd.len, DMA_BIDIRECTIONAL);
+ xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) {
if (sq->mpwqe.wqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index 79b487d89757..39fa0a705856 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -5,7 +5,7 @@
#define __MLX5_EN_XSK_TX_H__
#include "en.h"
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
/* TX data path */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
index 4baaa5788320..7b17fcd0a56d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include "umem.h"
#include "setup.h"
#include "en/params.h"
@@ -10,40 +10,14 @@ static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv,
struct xdp_umem *umem)
{
struct device *dev = priv->mdev->device;
- u32 i;
- for (i = 0; i < umem->npgs; i++) {
- dma_addr_t dma = dma_map_page(dev, umem->pgs[i], 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
- if (unlikely(dma_mapping_error(dev, dma)))
- goto err_unmap;
- umem->pages[i].dma = dma;
- }
-
- return 0;
-
-err_unmap:
- while (i--) {
- dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- umem->pages[i].dma = 0;
- }
-
- return -ENOMEM;
+ return xsk_buff_dma_map(umem, dev, 0);
}
static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv,
struct xdp_umem *umem)
{
- struct device *dev = priv->mdev->device;
- u32 i;
-
- for (i = 0; i < umem->npgs; i++) {
- dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- umem->pages[i].dma = 0;
- }
+ return xsk_buff_dma_unmap(umem, 0);
}
static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
@@ -90,13 +64,14 @@ static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix)
static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
{
- return umem->headroom <= 0xffff && umem->chunk_size_nohr <= 0xffff;
+ return xsk_umem_get_headroom(umem) <= 0xffff &&
+ xsk_umem_get_chunk_size(umem) <= 0xffff;
}
void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
{
- xsk->headroom = umem->headroom;
- xsk->chunk_size = umem->chunk_size_nohr + umem->headroom;
+ xsk->headroom = xsk_umem_get_headroom(umem);
+ xsk->chunk_size = xsk_umem_get_chunk_size(umem);
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
@@ -241,18 +216,6 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
mlx5e_xsk_disable_umem(priv, ix);
}
-int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries)
-{
- struct xdp_umem_fq_reuse *reuseq;
-
- reuseq = xsk_reuseq_prepare(nentries);
- if (unlikely(!reuseq))
- return -ENOMEM;
- xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
- return 0;
-}
-
u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk)
{
u16 res = xsk->refcnt ? params->num_channels : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 3022463f2284..fac145dcf2ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -42,6 +42,8 @@
#include "en/txrx.h"
#if IS_ENABLED(CONFIG_GENEVE)
+#include <net/geneve.h>
+
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
{
return mlx5_tx_swp_supported(mdev);
@@ -100,33 +102,49 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
udp_hdr(skb)->len = htons(payload_len);
}
-static inline struct sk_buff *
-mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+struct mlx5e_accel_tx_state {
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_accel_tx_tls_state tls;
+#endif
+};
+
+static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *state)
{
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ mlx5e_udp_gso_handle_tx_skb(skb);
+
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
- skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
- if (unlikely(!skb))
- return NULL;
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
+ return false;
}
#endif
+ return true;
+}
+
+static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
- if (unlikely(!skb))
- return NULL;
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
+ return false;
}
#endif
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
- mlx5e_udp_gso_handle_tx_skb(skb);
-
- return skb;
+ return true;
}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 29626c6c9c25..92eb3bad4acd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -75,18 +75,23 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
return ret;
}
-static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
+static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
+ unsigned int handle)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5e_ipsec_sa_entry *_sa_entry;
unsigned long flags;
- int ret;
- ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
+ if (_sa_entry->handle == handle) {
+ rcu_read_unlock();
+ return -EEXIST;
+ }
+ rcu_read_unlock();
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
- sa_entry->handle = ret;
+ sa_entry->handle = handle;
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
@@ -103,15 +108,6 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
}
-static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
- struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-
- /* xfrm already doing sync rcu between del and free callbacks */
-
- ida_simple_remove(&ipsec->halloc, sa_entry->handle);
-}
-
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_replay_state_esn *replay_esn;
@@ -199,6 +195,14 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
+
+ /* spi */
+ attrs->spi = x->id.spi;
+
+ /* source , destination ips */
+ memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
+ memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
+ attrs->is_ipv6 = (x->props.family != AF_INET);
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -284,8 +288,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
struct net_device *netdev = x->xso.dev;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
- __be32 saddr[4] = {0}, daddr[4] = {0}, spi;
- bool is_ipv6 = false;
+ unsigned int sa_handle;
int err;
priv = netdev_priv(netdev);
@@ -303,20 +306,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
sa_entry->x = x;
sa_entry->ipsec = priv->ipsec;
- /* Add the SA to handle processed incoming packets before the add SA
- * completion was received
- */
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
- err = mlx5e_ipsec_sadb_rx_add(sa_entry);
- if (err) {
- netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
- goto err_entry;
- }
- } else {
- sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
- mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
- }
-
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -327,41 +316,38 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
- goto err_sadb_rx;
+ goto err_sa_entry;
}
/* create hw context */
- if (x->props.family == AF_INET) {
- saddr[3] = x->props.saddr.a4;
- daddr[3] = x->id.daddr.a4;
- } else {
- memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
- memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
- is_ipv6 = true;
- }
- spi = x->id.spi;
sa_entry->hw_context =
mlx5_accel_esp_create_hw_context(priv->mdev,
sa_entry->xfrm,
- saddr, daddr, spi,
- is_ipv6);
+ &sa_handle);
if (IS_ERR(sa_entry->hw_context)) {
err = PTR_ERR(sa_entry->hw_context);
goto err_xfrm;
}
+ if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
+ if (err)
+ goto err_hw_ctx;
+ } else {
+ sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
+ mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
+ }
+
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
+err_hw_ctx:
+ mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
-err_sadb_rx:
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
- mlx5e_ipsec_sadb_rx_del(sa_entry);
- mlx5e_ipsec_sadb_rx_free(sa_entry);
- }
-err_entry:
+err_sa_entry:
kfree(sa_entry);
+
out:
return err;
}
@@ -390,9 +376,6 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
- mlx5e_ipsec_sadb_rx_free(sa_entry);
-
kfree(sa_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 93bf10e6508c..c85151a1e008 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -109,11 +109,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_count(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv);
-int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data);
-
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
@@ -136,26 +131,6 @@ static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
{
}
-static inline int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
-{
- return 0;
-}
-
-static inline int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv,
- uint8_t *data)
-{
- return 0;
-}
-
-static inline void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
-{
-}
-
-static inline int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
-{
- return 0;
-}
-
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 0dd17514caae..824b87ac8f9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -233,11 +233,10 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq));
}
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_tx_wqe *wqe,
- struct sk_buff *skb)
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
+ struct mlx5_wqe_eth_seg *eseg,
+ struct sk_buff *skb)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo = xfrm_offload(skb);
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
@@ -245,7 +244,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct sec_path *sp;
if (!xo)
- return skb;
+ return true;
sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) {
@@ -276,16 +275,16 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
- mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
+ mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
mlx5e_ipsec_set_metadata(skb, mdata, xo);
- return skb;
+ return true;
drop:
kfree_skb(skb);
- return NULL;
+ return false;
}
static inline struct xfrm_state *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index db84500b024f..ba02643586a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -52,9 +52,9 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_tx_wqe *wqe,
- struct sk_buff *skb);
+bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
+ struct mlx5_wqe_eth_seg *eseg,
+ struct sk_buff *skb);
#endif /* CONFIG_MLX5_EN_IPSEC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 6fea59223dc4..6c5c54bcd9be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -38,6 +38,7 @@
#include "accel/ipsec.h"
#include "fpga/sdk.h"
#include "en_accel/ipsec.h"
+#include "fpga/ipsec.h"
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) },
@@ -73,61 +74,74 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
-#define NUM_IPSEC_COUNTERS (NUM_IPSEC_HW_COUNTERS + NUM_IPSEC_SW_COUNTERS)
-
-int mlx5e_ipsec_get_count(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
- if (!priv->ipsec)
- return 0;
-
- return NUM_IPSEC_COUNTERS;
+ return NUM_IPSEC_SW_COUNTERS;
}
-int mlx5e_ipsec_get_strings(struct mlx5e_priv *priv, uint8_t *data)
-{
- unsigned int i, idx = 0;
+static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
- if (!priv->ipsec)
- return 0;
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw)
+{
+ unsigned int i;
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
+ if (priv->ipsec)
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_sw_stats_desc[i].format);
+ return idx;
+}
- for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_sw_stats_desc[i].format);
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
+{
+ int i;
- return NUM_IPSEC_COUNTERS;
+ if (priv->ipsec)
+ for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
+ mlx5e_ipsec_sw_stats_desc, i);
+ return idx;
}
-void mlx5e_ipsec_update_stats(struct mlx5e_priv *priv)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
- int ret;
+ return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+}
- if (!priv->ipsec)
- return;
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
+{
+ int ret = 0;
- ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
- NUM_IPSEC_HW_COUNTERS);
+ if (priv->ipsec)
+ ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
+ NUM_IPSEC_HW_COUNTERS);
if (ret)
memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats));
}
-int mlx5e_ipsec_get_stats(struct mlx5e_priv *priv, u64 *data)
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
{
- int i, idx = 0;
-
- if (!priv->ipsec)
- return 0;
+ unsigned int i;
- for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
- mlx5e_ipsec_hw_stats_desc, i);
+ if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_hw_stats_desc[i].format);
- for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
- mlx5e_ipsec_sw_stats_desc, i);
+ return idx;
+}
- return NUM_IPSEC_COUNTERS;
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
+{
+ int i;
+
+ if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev))
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats,
+ mlx5e_ipsec_hw_stats_desc,
+ i);
+ return idx;
}
+
+MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
+MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 7d1985fa0d4f..452fcf59c36b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -4,6 +4,19 @@
#include "en.h"
#include "en_accel/ktls.h"
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
+{
+ u16 num_dumps, stop_room = 0;
+
+ num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
+
+ stop_room += mlx5e_stop_room_for_wqe(MLX5E_KTLS_STATIC_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(MLX5E_KTLS_PROGRESS_WQEBBS);
+ stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
+
+ return stop_room;
+}
+
static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 63116be6b1d6..c6180892cfcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -9,6 +9,7 @@
#ifdef CONFIG_MLX5_EN_TLS
#include <net/tls.h>
#include "accel/tls.h"
+#include "en_accel/tls_rxtx.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
(offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
@@ -27,6 +28,14 @@ struct mlx5e_dump_wqe {
struct mlx5_wqe_data_seg data;
};
+#define MLX5E_TLS_FETCH_UMR_WQE(sq, pi) \
+ ((struct mlx5e_umr_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_STATIC_UMR_WQE_SZ))
+#define MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi) \
+ ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_PROGRESS_WQE_SZ))
+#define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \
+ ((struct mlx5e_dump_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, \
+ sizeof(struct mlx5e_dump_wqe)))
+
#define MLX5E_KTLS_DUMP_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
@@ -87,22 +96,22 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi);
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq);
+
static inline u8
-mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
- unsigned int sync_len)
+mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
+ unsigned int sync_len)
{
/* Given the MTU and sync_len, calculates an upper bound for the
- * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
+ * number of DUMP WQEs needed for the TX resync of a record.
*/
- return MLX5E_KTLS_DUMP_WQEBBS *
- (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
+ return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
}
#else
@@ -114,7 +123,6 @@ static inline void
mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc) {}
-
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 52a56622034a..3cd78d9503c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -108,10 +108,11 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = num_wqebbs;
- wi->num_bytes = num_bytes;
- wi->resync_dump_frag_page = page;
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_bytes = num_bytes,
+ .resync_dump_frag_page = page,
+ };
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -134,13 +135,14 @@ post_static_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
+ u16 pi, num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS;
struct mlx5e_umr_wqe *umr_wqe;
- u16 pi;
- umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
- sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
+ tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
+ sq->pc += num_wqebbs;
}
static void
@@ -148,13 +150,14 @@ post_progress_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
bool fence)
{
+ u16 pi, num_wqebbs = MLX5E_KTLS_PROGRESS_WQEBBS;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
- wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
- sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
+ tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
+ sq->pc += num_wqebbs;
}
static void
@@ -163,14 +166,6 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
bool skip_static_post, bool fence_first_post)
{
bool progress_fence = skip_static_post || !fence_first_post;
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 contig_wqebbs_room, pi;
-
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room <
- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
if (!skip_static_post)
post_static_params(sq, priv_tx, fence_first_post);
@@ -278,7 +273,9 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
int fsz;
u16 pi;
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+ BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@@ -343,11 +340,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 seq)
{
struct mlx5e_sq_stats *stats = sq->stats;
- struct mlx5_wq_cyc *wq = &sq->wq;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- u16 contig_wqebbs_room, pi;
- u8 num_wqebbs;
int i = 0;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
@@ -376,13 +370,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
return MLX5E_KTLS_SYNC_DONE;
}
- num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
-
- if (unlikely(contig_wqebbs_room < num_wqebbs))
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
-
for (; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
@@ -422,34 +409,18 @@ err_out:
return MLX5E_KTLS_SYNC_FAIL;
}
-struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi)
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, int datalen,
+ struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
- struct mlx5_wqe_ctrl_seg *cseg;
- struct tls_context *tls_ctx;
- int datalen;
u32 seq;
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
-
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
- if (!datalen)
- goto out;
-
- tls_ctx = tls_get_ctx(skb->sk);
- if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
- goto err_out;
-
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
stats->tls_ctx++;
}
@@ -460,30 +431,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
/* fall-through */
- default: /* MLX5E_KTLS_SYNC_FAIL */
+ case MLX5E_KTLS_SYNC_FAIL:
goto err_out;
}
}
priv_tx->expected_seq = seq + datalen;
- cseg = &(*wqe)->ctrl;
- cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
+ state->tls_tisn = priv_tx->tisn;
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
out:
- return skb;
+ return true;
err_out:
dev_kfree_skb_any(skb);
- return NULL;
+ return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index fba561ffe1d4..1fbb5a90cb38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -167,7 +167,7 @@ static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_rx *rx_ctx;
- u64 rcd_sn = *(u64 *)rcd_sn_data;
+ __be64 rcd_sn = *(__be64 *)rcd_sn_data;
if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
return -EINVAL;
@@ -240,3 +240,17 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
kfree(tls);
priv->tls = NULL;
}
+
+u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+
+ if (!mlx5_accel_is_tls_device(mdev))
+ return 0;
+
+ if (MLX5_CAP_GEN(mdev, tls_tx))
+ return mlx5e_ktls_get_stop_room(sq);
+
+ /* Resync SKB. */
+ return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index 9015f3f7792d..9219bdb2786e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -94,6 +94,8 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv);
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
+u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq);
+
#else
static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
@@ -108,6 +110,11 @@ static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
+static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+{
+ return 0;
+}
+
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index ef1ed15a53b4..05454a843b28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -184,18 +184,17 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
nskb->queue_mapping = skb->queue_mapping;
}
-static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
- struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi,
- struct mlx5e_tls *tls)
+static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ struct mlx5e_tx_wqe *wqe;
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
+ u16 pi;
int i;
sq->stats->tls_ooo++;
@@ -217,7 +216,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
if (likely(payload <= -info.sync_len))
/* SKB payload doesn't require offload
*/
- return skb;
+ return true;
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
goto err_out;
@@ -247,20 +246,19 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
- return skb;
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
+
+ return true;
err_out:
dev_kfree_skb_any(skb);
- return NULL;
+ return false;
}
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_tx *context;
@@ -269,41 +267,45 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen;
u32 skb_seq;
- if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
- skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
- goto out;
- }
-
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
+ return true;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
- goto out;
+ return true;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != netdev))
- goto out;
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
+ goto err_out;
+
+ if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
+ return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq;
- if (unlikely(expected_seq != skb_seq)) {
- skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
- goto out;
- }
+ if (unlikely(expected_seq != skb_seq))
+ return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
dev_kfree_skb_any(skb);
- skb = NULL;
- goto out;
+ return false;
}
context->expected_seq = skb_seq + datalen;
-out:
- return skb;
+ return true;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state)
+{
+ cseg->tisn = cpu_to_be32(state->tls_tisn << 8);
}
static int tls_update_resync_sn(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 90bc1f2384c8..a50d0394df0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -40,11 +40,14 @@
#include "en.h"
#include "en/txrx.h"
-struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi);
+struct mlx5e_accel_tx_tls_state {
+ u32 tls_tisn;
+};
+
+bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
+void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct mlx5e_accel_tx_tls_state *state);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index f7890e0ce96c..1e42c7ae621b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -36,12 +36,11 @@
* Global resources are common to all the netdevices crated on the same nic.
*/
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
- struct mlx5e_tir *tir, u32 *in, int inlen)
+int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in)
{
int err;
- err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
+ err = mlx5_core_create_tir(mdev, in, &tir->tirn);
if (err)
return err;
@@ -142,10 +141,12 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
memset(res, 0, sizeof(*res));
}
-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
+int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
+ bool enable_mc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
+ u8 lb_flags = 0;
int err = 0;
u32 tirn = 0;
int inlen;
@@ -159,15 +160,20 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
}
if (enable_uc_lb)
- MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
+ lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (enable_mc_lb)
+ lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ if (lb_flags)
+ MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags);
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
- err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+ err = mlx5_core_modify_tir(mdev, tirn, in);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 47874d34156b..bc102d094bbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -35,6 +35,8 @@
#include "en/port.h"
#include "en/port_buffer.h"
+#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
+
#define MLX5E_100MB (100000)
#define MLX5E_1GB (1000000)
@@ -49,6 +51,12 @@ enum {
MLX5E_LOWEST_PRIO_GROUP = 0,
};
+enum {
+ MLX5_DCB_CHG_RESET,
+ MLX5_DCB_NO_CHG,
+ MLX5_DCB_CHG_NO_RESET,
+};
+
#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
MLX5_CAP_QCAM_REG(mdev, qpts) && \
MLX5_CAP_QCAM_REG(mdev, qpdpm))
@@ -238,7 +246,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
* Report both group #0 and #1 as ETS type.
* All the tcs in group #0 will be reported with 0% BW.
*/
-int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
+static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
@@ -977,7 +985,7 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
return err;
}
-const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
+static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_getets = mlx5e_dcbnl_ieee_getets,
.ieee_setets = mlx5e_dcbnl_ieee_setets,
.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
@@ -1009,6 +1017,24 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.setpfcstate = mlx5e_dcbnl_setpfcstate,
};
+void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
+ netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+}
+
+void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (MLX5_CAP_GEN(mdev, qos))
+ netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+}
+
static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
enum mlx5_dcbx_oper_mode *mode)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index bc290ae80a53..3ef2525e8de9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -432,7 +432,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
*cur_params = new_channels.params;
- mlx5e_num_channels_changed(priv);
+ err = mlx5e_num_channels_changed(priv);
goto out;
}
@@ -1219,7 +1219,7 @@ int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
}
if (hash_changed)
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ mlx5e_modify_tirs_hash(priv, in);
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3bc2ac3d53fc..83c9b2bbc4af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -858,7 +858,7 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
goto out;
priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ mlx5e_modify_tirs_hash(priv, in);
out:
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bd8d0e096085..a836a02a2116 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -38,7 +38,7 @@
#include <linux/bpf.h>
#include <linux/if_bridge.h>
#include <net/page_pool.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
#include "en/txrx.h"
@@ -66,7 +66,6 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
-
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -233,7 +232,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- cseg->imm = rq->mkey_be;
+ cseg->umr_mkey = rq->mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
ucseg->xlt_octowords =
@@ -374,7 +373,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->mdev;
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
- u32 num_xsk_frames = 0;
u32 rq_xdp_ix;
u32 pool_size;
int wq_sz;
@@ -414,7 +412,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
- rq->buff.umem_headroom = xsk ? xsk->headroom : 0;
pool_size = 1 << params->log_rq_mtu_frames;
switch (rq->wq_type) {
@@ -428,10 +425,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- if (xsk)
- num_xsk_frames = wq_sz <<
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
-
pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
mlx5e_mpwqe_get_log_rq_size(params, xsk);
@@ -462,6 +455,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
+
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_wq_destroy;
@@ -481,10 +476,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
- if (xsk)
- num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags;
-
rq->wqe.info = rqp->frags_info;
+ rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
+
rq->wqe.frags =
kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
(wq_sz << rq->wqe.info.log_num_frags)),
@@ -522,17 +516,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
if (xsk) {
- err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames);
- if (unlikely(err)) {
- mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n",
- num_xsk_frames);
- goto err_free;
- }
-
- rq->zca.free = mlx5e_xsk_zca_free;
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
- MEM_TYPE_ZERO_COPY,
- &rq->zca);
+ MEM_TYPE_XSK_BUFF_POOL, NULL);
+ xsk_buff_set_rxq_info(rq->umem, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
pp_params.order = 0;
@@ -721,7 +707,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -752,7 +738,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
MLX5_SET(rqc, rqc, scatter_fcs, enable);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -781,7 +767,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
MLX5_SET(rqc, rqc, vsd, vsd);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
- err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
@@ -1027,17 +1013,17 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
- kvfree(sq->db.ico_wqe);
+ kvfree(sq->db.wqe_info);
}
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
- sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
- sizeof(*sq->db.ico_wqe)),
- GFP_KERNEL, numa);
- if (!sq->db.ico_wqe)
+ size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
+ if (!sq->db.wqe_info)
return -ENOMEM;
return 0;
@@ -1116,6 +1102,22 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
return 0;
}
+static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
+{
+ int sq_size = 1 << log_sq_size;
+
+ sq->stop_room = mlx5e_tls_get_stop_room(sq);
+ sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+
+ if (WARN_ON(sq->stop_room >= sq_size)) {
+ netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
+ sq->stop_room, sq_size);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
@@ -1140,20 +1142,16 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
- sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
-#ifdef CONFIG_MLX5_EN_TLS
- if (mlx5_accel_is_tls_device(c->priv->mdev)) {
+ if (mlx5_accel_is_tls_device(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
- sq->stop_room += MLX5E_SQ_TLS_ROOM +
- mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
- TLS_MAX_PAYLOAD_SIZE);
- }
-#endif
+ err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
+ if (err)
+ return err;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1259,7 +1257,7 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
}
- err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
+ err = mlx5_core_modify_sq(mdev, sqn, in);
kvfree(in);
@@ -1364,13 +1362,12 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
/* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe_info *wi;
struct mlx5e_tx_wqe *nop;
- wi = &sq->db.wqe_info[pi];
+ sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = 1;
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
}
@@ -1482,20 +1479,21 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
/* Pre initialize fixed WQE fields */
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
+ sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = 1,
+ .num_pkts = 1,
+ };
+
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
dseg->lkey = sq->mkey_be;
-
- wi->num_wqebbs = 1;
- wi->num_pkts = 1;
}
}
@@ -2698,7 +2696,7 @@ static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
ttconfig->rx_hash_fields = rx_hash_fields;
}
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in)
{
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -2714,7 +2712,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5e_update_rx_hash_fields(&ttconfig, tt,
rss->rx_hash_fields[tt]);
mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
- mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
}
/* Verify inner tirs resources allocated */
@@ -2726,8 +2724,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5e_update_rx_hash_fields(&ttconfig, tt,
rss->rx_hash_fields[tt]);
mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
- mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
- inlen);
+ mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
}
}
@@ -2753,15 +2750,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
- inlen);
+ err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
if (err)
goto free_in;
}
for (ix = 0; ix < priv->max_nch; ix++) {
- err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
- in, inlen);
+ err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in);
if (err)
goto free_in;
}
@@ -2840,11 +2835,8 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
ETH_MAX_MTU);
}
-static void mlx5e_netdev_set_tcs(struct net_device *netdev)
+static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- int nch = priv->channels.params.num_channels;
- int ntc = priv->channels.params.num_tc;
int tc;
netdev_reset_tc(netdev);
@@ -2861,15 +2853,47 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
-static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count)
+static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
- int num_txqs = count * priv->channels.params.num_tc;
- int num_rxqs = count * priv->profile->rq_groups;
struct net_device *netdev = priv->netdev;
+ int num_txqs, num_rxqs, nch, ntc;
+ int old_num_txqs, old_ntc;
+ int err;
+
+ old_num_txqs = netdev->real_num_tx_queues;
+ old_ntc = netdev->num_tc;
+
+ nch = priv->channels.params.num_channels;
+ ntc = priv->channels.params.num_tc;
+ num_txqs = nch * ntc;
+ num_rxqs = nch * priv->profile->rq_groups;
+
+ mlx5e_netdev_set_tcs(netdev, nch, ntc);
- mlx5e_netdev_set_tcs(netdev);
- netif_set_real_num_tx_queues(netdev, num_txqs);
- netif_set_real_num_rx_queues(netdev, num_rxqs);
+ err = netif_set_real_num_tx_queues(netdev, num_txqs);
+ if (err) {
+ netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+ goto err_tcs;
+ }
+ err = netif_set_real_num_rx_queues(netdev, num_rxqs);
+ if (err) {
+ netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
+ goto err_txqs;
+ }
+
+ return 0;
+
+err_txqs:
+ /* netif_set_real_num_rx_queues could fail only when nch increased. Only
+ * one of nch and ntc is changed in this function. That means, the call
+ * to netif_set_real_num_tx_queues below should not fail, because it
+ * decreases the number of TX queues.
+ */
+ WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
+
+err_tcs:
+ mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc);
+ return err;
}
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
@@ -2896,8 +2920,12 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
+ int err;
+
+ err = mlx5e_update_netdev_queues(priv);
+ if (err)
+ return err;
- mlx5e_update_netdev_queues(priv, count);
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
if (!netif_is_rxfh_configured(priv->netdev))
@@ -3215,7 +3243,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
- return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
+ return mlx5_core_create_tis(mdev, in, tisn);
}
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
@@ -3333,7 +3361,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
tir = &priv->indir_tir[tt];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_indir_tir_ctx(priv, tt, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (err) {
mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
@@ -3348,7 +3376,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
tir = &priv->inner_indir_tir[i];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (err) {
mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
goto err_destroy_inner_tirs;
@@ -3391,7 +3419,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
tir = &tirs[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
- err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ err = mlx5e_create_tir(priv->mdev, tir, in);
if (unlikely(err))
goto err_destroy_ch_tirs;
}
@@ -3494,41 +3522,6 @@ out:
return err;
}
-#ifdef CONFIG_MLX5_ESWITCH
-static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct flow_cls_offload *cls_flower,
- unsigned long flags)
-{
- switch (cls_flower->command) {
- case FLOW_CLS_REPLACE:
- return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
- flags);
- case FLOW_CLS_DESTROY:
- return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
- flags);
- case FLOW_CLS_STATS:
- return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
- flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
- struct mlx5e_priv *priv = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-#endif
-
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -3537,7 +3530,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct mlx5e_priv *priv = netdev_priv(dev);
switch (type) {
-#ifdef CONFIG_MLX5_ESWITCH
case TC_SETUP_BLOCK: {
struct flow_block_offload *f = type_data;
@@ -3547,7 +3539,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
mlx5e_setup_tc_block_cb,
priv, priv, true);
}
-#endif
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data);
default:
@@ -3720,7 +3711,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3831,7 +3822,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
#endif
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
@@ -4889,10 +4880,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->netdev_ops = &mlx5e_netdev_ops;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
- netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
-#endif
+ mlx5e_dcbnl_build_netdev(netdev);
netdev->watchdog_timeo = 15 * HZ;
@@ -5012,29 +5000,40 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
- if (err) {
- mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
- priv->q_counter = 0;
- }
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
+ if (!err)
+ priv->q_counter =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
- err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
- if (err) {
- mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
- priv->drop_rq_q_counter = 0;
- }
+ err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
+ if (!err)
+ priv->drop_rq_q_counter =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
- if (priv->q_counter)
- mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
- if (priv->drop_rq_q_counter)
- mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ if (priv->q_counter) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->q_counter);
+ mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ }
+
+ if (priv->drop_rq_q_counter) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->drop_rq_q_counter);
+ mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ }
}
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
@@ -5169,9 +5168,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
return err;
}
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_initialize(priv);
-#endif
return 0;
}
@@ -5198,9 +5195,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
-#endif
queue_work(priv->wq, &priv->set_rx_mode_work);
@@ -5215,10 +5210,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->netdev->reg_state == NETREG_REGISTERED)
mlx5e_dcbnl_delete_app(priv);
-#endif
rtnl_lock();
if (netif_running(priv->netdev))
@@ -5238,7 +5231,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
{
- return mlx5e_refresh_tirs(priv, false);
+ return mlx5e_refresh_tirs(priv, false, false);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -5373,9 +5366,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
*/
if (take_rtnl)
rtnl_lock();
- mlx5e_num_channels_changed(priv);
+ err = mlx5e_num_channels_changed(priv);
if (take_rtnl)
rtnl_unlock();
+ if (err)
+ goto out;
err = profile->init_tx(priv);
if (err)
@@ -5513,9 +5508,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
mlx5e_devlink_port_type_eth_set(priv);
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
-#endif
return priv;
err_devlink_port_unregister:
@@ -5538,9 +5531,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
}
#endif
priv = vpriv;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
-#endif
unregister_netdev(priv->netdev);
mlx5e_devlink_port_unregister(priv);
mlx5e_detach(mdev, vpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 4a8e0dfdc5f2..006807e04eda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -35,7 +35,6 @@
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
-#include <net/netevent.h>
#include <net/arp.h>
#include <net/devlink.h>
#include <net/ipv6_stubs.h>
@@ -45,9 +44,9 @@
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
-#include "en/tc_tun.h"
+#include "en/rep/tc.h"
+#include "en/rep/neigh.h"
#include "fs_core.h"
-#include "lib/port_tun.h"
#include "lib/mlx5.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
@@ -58,16 +57,6 @@
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
-struct mlx5e_rep_indr_block_priv {
- struct net_device *netdev;
- struct mlx5e_rep_priv *rpriv;
-
- struct list_head list;
-};
-
-static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev);
-
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
@@ -485,706 +474,6 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
mlx5e_sqs2vport_stop(esw, rep);
}
-static unsigned long mlx5e_rep_ipv6_interval(void)
-{
- if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
- return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
-
- return ~0UL;
-}
-
-static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
-{
- unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
- unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
- struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
- mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
-}
-
-void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
-
- mlx5_fc_queue_stats_work(priv->mdev,
- &neigh_update->neigh_stats_work,
- neigh_update->min_interval);
-}
-
-static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
-{
- return refcount_inc_not_zero(&nhe->refcnt);
-}
-
-static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
-
-static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
-{
- if (refcount_dec_and_test(&nhe->refcnt)) {
- mlx5e_rep_neigh_entry_remove(nhe);
- kfree_rcu(nhe, rcu);
- }
-}
-
-static struct mlx5e_neigh_hash_entry *
-mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
- struct mlx5e_neigh_hash_entry *nhe)
-{
- struct mlx5e_neigh_hash_entry *next = NULL;
-
- rcu_read_lock();
-
- for (next = nhe ?
- list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
- &nhe->neigh_list,
- struct mlx5e_neigh_hash_entry,
- neigh_list) :
- list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
- struct mlx5e_neigh_hash_entry,
- neigh_list);
- next;
- next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
- &next->neigh_list,
- struct mlx5e_neigh_hash_entry,
- neigh_list))
- if (mlx5e_rep_neigh_entry_hold(next))
- break;
-
- rcu_read_unlock();
-
- if (nhe)
- mlx5e_rep_neigh_entry_release(nhe);
-
- return next;
-}
-
-static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
-{
- struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
- neigh_update.neigh_stats_work.work);
- struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_neigh_hash_entry *nhe = NULL;
-
- rtnl_lock();
- if (!list_empty(&rpriv->neigh_update.neigh_list))
- mlx5e_rep_queue_neigh_stats_work(priv);
-
- while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
- mlx5e_tc_update_neigh_used_value(nhe);
-
- rtnl_unlock();
-}
-
-static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
- bool neigh_connected,
- unsigned char ha[ETH_ALEN])
-{
- struct ethhdr *eth = (struct ethhdr *)e->encap_header;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- bool encap_connected;
- LIST_HEAD(flow_list);
-
- ASSERT_RTNL();
-
- /* wait for encap to be fully initialized */
- wait_for_completion(&e->res_ready);
-
- mutex_lock(&esw->offloads.encap_tbl_lock);
- encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- if (e->compl_result < 0 || (encap_connected == neigh_connected &&
- ether_addr_equal(e->h_dest, ha)))
- goto unlock;
-
- mlx5e_take_all_encap_flows(e, &flow_list);
-
- if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
- (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
- mlx5e_tc_encap_flows_del(priv, e, &flow_list);
-
- if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
- ether_addr_copy(e->h_dest, ha);
- ether_addr_copy(eth->h_dest, ha);
- /* Update the encap source mac, in case that we delete
- * the flows when encap source mac changed.
- */
- ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
-
- mlx5e_tc_encap_flows_add(priv, e, &flow_list);
- }
-unlock:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- mlx5e_put_encap_flow_list(priv, &flow_list);
-}
-
-static void mlx5e_rep_neigh_update(struct work_struct *work)
-{
- struct mlx5e_neigh_hash_entry *nhe =
- container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
- struct neighbour *n = nhe->n;
- struct mlx5e_encap_entry *e;
- unsigned char ha[ETH_ALEN];
- struct mlx5e_priv *priv;
- bool neigh_connected;
- u8 nud_state, dead;
-
- rtnl_lock();
-
- /* If these parameters are changed after we release the lock,
- * we'll receive another event letting us know about it.
- * We use this lock to avoid inconsistency between the neigh validity
- * and it's hw address.
- */
- read_lock_bh(&n->lock);
- memcpy(ha, n->ha, ETH_ALEN);
- nud_state = n->nud_state;
- dead = n->dead;
- read_unlock_bh(&n->lock);
-
- neigh_connected = (nud_state & NUD_VALID) && !dead;
-
- trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
-
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- if (!mlx5e_encap_take(e))
- continue;
-
- priv = netdev_priv(e->out_dev);
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
- mlx5e_encap_put(priv, e);
- }
- mlx5e_rep_neigh_entry_release(nhe);
- rtnl_unlock();
- neigh_release(n);
-}
-
-static struct mlx5e_rep_indr_block_priv *
-mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev)
-{
- struct mlx5e_rep_indr_block_priv *cb_priv;
-
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
- list_for_each_entry(cb_priv,
- &rpriv->uplink_priv.tc_indr_block_priv_list,
- list)
- if (cb_priv->netdev == netdev)
- return cb_priv;
-
- return NULL;
-}
-
-static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
-{
- struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
- struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
-
- list_for_each_entry_safe(cb_priv, temp, head, list) {
- mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
- kfree(cb_priv);
- }
-}
-
-static int
-mlx5e_rep_indr_offload(struct net_device *netdev,
- struct flow_cls_offload *flower,
- struct mlx5e_rep_indr_block_priv *indr_priv,
- unsigned long flags)
-{
- struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
- int err = 0;
-
- switch (flower->command) {
- case FLOW_CLS_REPLACE:
- err = mlx5e_configure_flower(netdev, priv, flower, flags);
- break;
- case FLOW_CLS_DESTROY:
- err = mlx5e_delete_flower(netdev, priv, flower, flags);
- break;
- case FLOW_CLS_STATS:
- err = mlx5e_stats_flower(netdev, priv, flower, flags);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
- return err;
-}
-
-static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
- void *type_data, void *indr_priv)
-{
- unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
- struct mlx5e_rep_indr_block_priv *priv = indr_priv;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
- flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
- void *type_data, void *indr_priv)
-{
- struct mlx5e_rep_indr_block_priv *priv = indr_priv;
- struct flow_cls_offload *f = type_data;
- struct flow_cls_offload tmp;
- struct mlx5e_priv *mpriv;
- struct mlx5_eswitch *esw;
- unsigned long flags;
- int err;
-
- mpriv = netdev_priv(priv->rpriv->netdev);
- esw = mpriv->mdev->priv.eswitch;
-
- flags = MLX5_TC_FLAG(EGRESS) |
- MLX5_TC_FLAG(ESW_OFFLOAD) |
- MLX5_TC_FLAG(FT_OFFLOAD);
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- memcpy(&tmp, f, sizeof(*f));
-
- /* Re-use tc offload path by moving the ft flow to the
- * reserved ft chain.
- *
- * FT offload can use prio range [0, INT_MAX], so we normalize
- * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
- * as with tc, where prio 0 isn't supported.
- *
- * We only support chain 0 of FT offload.
- */
- if (!mlx5_esw_chains_prios_supported(esw) ||
- tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
- tmp.common.chain_index)
- return -EOPNOTSUPP;
-
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
- tmp.common.prio++;
- err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
- memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
- return err;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void mlx5e_rep_indr_block_unbind(void *cb_priv)
-{
- struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
-
- list_del(&indr_priv->list);
- kfree(indr_priv);
-}
-
-static LIST_HEAD(mlx5e_block_cb_list);
-
-static int
-mlx5e_rep_indr_setup_block(struct net_device *netdev,
- struct mlx5e_rep_priv *rpriv,
- struct flow_block_offload *f,
- flow_setup_cb_t *setup_cb)
-{
- struct mlx5e_rep_indr_block_priv *indr_priv;
- struct flow_block_cb *block_cb;
-
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- f->unlocked_driver_cb = true;
- f->driver_block_list = &mlx5e_block_cb_list;
-
- switch (f->command) {
- case FLOW_BLOCK_BIND:
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
- if (indr_priv)
- return -EEXIST;
-
- indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
- if (!indr_priv)
- return -ENOMEM;
-
- indr_priv->netdev = netdev;
- indr_priv->rpriv = rpriv;
- list_add(&indr_priv->list,
- &rpriv->uplink_priv.tc_indr_block_priv_list);
-
- block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
- mlx5e_rep_indr_block_unbind);
- if (IS_ERR(block_cb)) {
- list_del(&indr_priv->list);
- kfree(indr_priv);
- return PTR_ERR(block_cb);
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
-
- return 0;
- case FLOW_BLOCK_UNBIND:
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
- if (!indr_priv)
- return -ENOENT;
-
- block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
- if (!block_cb)
- return -ENOENT;
-
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static
-int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
-{
- switch (type) {
- case TC_SETUP_BLOCK:
- return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
- mlx5e_rep_indr_setup_tc_cb);
- case TC_SETUP_FT:
- return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
- mlx5e_rep_indr_setup_ft_cb);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev)
-{
- int err;
-
- err = __flow_indr_block_cb_register(netdev, rpriv,
- mlx5e_rep_indr_setup_cb,
- rpriv);
- if (err) {
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
-
- mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
- netdev_name(netdev), err);
- }
- return err;
-}
-
-static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev)
-{
- __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb,
- rpriv);
-}
-
-static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
- uplink_priv.netdevice_nb);
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-
- if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
- !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
- return NOTIFY_OK;
-
- switch (event) {
- case NETDEV_REGISTER:
- mlx5e_rep_indr_register_block(rpriv, netdev);
- break;
- case NETDEV_UNREGISTER:
- mlx5e_rep_indr_unregister_block(rpriv, netdev);
- break;
- }
- return NOTIFY_OK;
-}
-
-static void
-mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe,
- struct neighbour *n)
-{
- /* Take a reference to ensure the neighbour and mlx5 encap
- * entry won't be destructed until we drop the reference in
- * delayed work.
- */
- neigh_hold(n);
-
- /* This assignment is valid as long as the the neigh reference
- * is taken
- */
- nhe->n = n;
-
- if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
- mlx5e_rep_neigh_entry_release(nhe);
- neigh_release(n);
- }
-}
-
-static struct mlx5e_neigh_hash_entry *
-mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
- struct mlx5e_neigh *m_neigh);
-
-static int mlx5e_rep_netevent_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
- neigh_update.netevent_nb);
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_neigh_hash_entry *nhe = NULL;
- struct mlx5e_neigh m_neigh = {};
- struct neigh_parms *p;
- struct neighbour *n;
- bool found = false;
-
- switch (event) {
- case NETEVENT_NEIGH_UPDATE:
- n = ptr;
-#if IS_ENABLED(CONFIG_IPV6)
- if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
-#else
- if (n->tbl != &arp_tbl)
-#endif
- return NOTIFY_DONE;
-
- m_neigh.dev = n->dev;
- m_neigh.family = n->ops->family;
- memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
-
- rcu_read_lock();
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
- rcu_read_unlock();
- if (!nhe)
- return NOTIFY_DONE;
-
- mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
- break;
-
- case NETEVENT_DELAY_PROBE_TIME_UPDATE:
- p = ptr;
-
- /* We check the device is present since we don't care about
- * changes in the default table, we only care about changes
- * done per device delay prob time parameter.
- */
-#if IS_ENABLED(CONFIG_IPV6)
- if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
-#else
- if (!p->dev || p->tbl != &arp_tbl)
-#endif
- return NOTIFY_DONE;
-
- rcu_read_lock();
- list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
- neigh_list) {
- if (p->dev == nhe->m_neigh.dev) {
- found = true;
- break;
- }
- }
- rcu_read_unlock();
- if (!found)
- return NOTIFY_DONE;
-
- neigh_update->min_interval = min_t(unsigned long,
- NEIGH_VAR(p, DELAY_PROBE_TIME),
- neigh_update->min_interval);
- mlx5_fc_update_sampling_interval(priv->mdev,
- neigh_update->min_interval);
- break;
- }
- return NOTIFY_DONE;
-}
-
-static const struct rhashtable_params mlx5e_neigh_ht_params = {
- .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
- .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
- .key_len = sizeof(struct mlx5e_neigh),
- .automatic_shrinking = true,
-};
-
-static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
-{
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- int err;
-
- err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&neigh_update->neigh_list);
- mutex_init(&neigh_update->encap_lock);
- INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
- mlx5e_rep_neigh_stats_work);
- mlx5e_rep_neigh_update_init_interval(rpriv);
-
- rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
- err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
- if (err)
- goto out_err;
- return 0;
-
-out_err:
- rhashtable_destroy(&neigh_update->neigh_ht);
- return err;
-}
-
-static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
-{
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
-
- unregister_netevent_notifier(&neigh_update->netevent_nb);
-
- flush_workqueue(priv->wq); /* flush neigh update works */
-
- cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
-
- mutex_destroy(&neigh_update->encap_lock);
- rhashtable_destroy(&neigh_update->neigh_ht);
-}
-
-static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- int err;
-
- err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
- &nhe->rhash_node,
- mlx5e_neigh_ht_params);
- if (err)
- return err;
-
- list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
-
- return err;
-}
-
-static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
-{
- struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
-
- mutex_lock(&rpriv->neigh_update.encap_lock);
-
- list_del_rcu(&nhe->neigh_list);
-
- rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
- &nhe->rhash_node,
- mlx5e_neigh_ht_params);
- mutex_unlock(&rpriv->neigh_update.encap_lock);
-}
-
-/* This function must only be called under the representor's encap_lock or
- * inside rcu read lock section.
- */
-static struct mlx5e_neigh_hash_entry *
-mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
- struct mlx5e_neigh *m_neigh)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct mlx5e_neigh_hash_entry *nhe;
-
- nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
- mlx5e_neigh_ht_params);
- return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
-}
-
-static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
- struct mlx5e_neigh_hash_entry **nhe)
-{
- int err;
-
- *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
- if (!*nhe)
- return -ENOMEM;
-
- (*nhe)->priv = priv;
- memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
- INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
- spin_lock_init(&(*nhe)->encap_list_lock);
- INIT_LIST_HEAD(&(*nhe)->encap_list);
- refcount_set(&(*nhe)->refcnt, 1);
-
- err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
- if (err)
- goto out_free;
- return 0;
-
-out_free:
- kfree(*nhe);
- return err;
-}
-
-int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
- struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
- struct mlx5e_neigh_hash_entry *nhe;
- int err;
-
- err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
- if (err)
- return err;
-
- mutex_lock(&rpriv->neigh_update.encap_lock);
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
- if (!nhe) {
- err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
- if (err) {
- mutex_unlock(&rpriv->neigh_update.encap_lock);
- mlx5_tun_entropy_refcount_dec(tun_entropy,
- e->reformat_type);
- return err;
- }
- }
-
- e->nhe = nhe;
- spin_lock(&nhe->encap_list_lock);
- list_add_rcu(&e->encap_list, &nhe->encap_list);
- spin_unlock(&nhe->encap_list_lock);
-
- mutex_unlock(&rpriv->neigh_update.encap_lock);
-
- return 0;
-}
-
-void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
- struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
-
- if (!e->nhe)
- return;
-
- spin_lock(&e->nhe->encap_list_lock);
- list_del_rcu(&e->encap_list);
- spin_unlock(&e->nhe->encap_list_lock);
-
- mlx5e_rep_neigh_entry_release(e->nhe);
- e->nhe = NULL;
- mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
-}
-
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1225,129 +514,6 @@ static int mlx5e_rep_close(struct net_device *dev)
return ret;
}
-static int
-mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
- struct flow_cls_offload *cls_flower, int flags)
-{
- switch (cls_flower->command) {
- case FLOW_CLS_REPLACE:
- return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
- flags);
- case FLOW_CLS_DESTROY:
- return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
- flags);
- case FLOW_CLS_STATS:
- return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
- flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static
-int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
- struct tc_cls_matchall_offload *ma)
-{
- switch (ma->command) {
- case TC_CLSMATCHALL_REPLACE:
- return mlx5e_tc_configure_matchall(priv, ma);
- case TC_CLSMATCHALL_DESTROY:
- return mlx5e_tc_delete_matchall(priv, ma);
- case TC_CLSMATCHALL_STATS:
- mlx5e_tc_stats_matchall(priv, ma);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
- struct mlx5e_priv *priv = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
- case TC_SETUP_CLSMATCHALL:
- return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- struct flow_cls_offload tmp, *f = type_data;
- struct mlx5e_priv *priv = cb_priv;
- struct mlx5_eswitch *esw;
- unsigned long flags;
- int err;
-
- flags = MLX5_TC_FLAG(INGRESS) |
- MLX5_TC_FLAG(ESW_OFFLOAD) |
- MLX5_TC_FLAG(FT_OFFLOAD);
- esw = priv->mdev->priv.eswitch;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- memcpy(&tmp, f, sizeof(*f));
-
- if (!mlx5_esw_chains_prios_supported(esw))
- return -EOPNOTSUPP;
-
- /* Re-use tc offload path by moving the ft flow to the
- * reserved ft chain.
- *
- * FT offload can use prio range [0, INT_MAX], so we normalize
- * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
- * as with tc, where prio 0 isn't supported.
- *
- * We only support chain 0 of FT offload.
- */
- if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
- return -EOPNOTSUPP;
- if (tmp.common.chain_index != 0)
- return -EOPNOTSUPP;
-
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
- tmp.common.prio++;
- err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
- memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
- return err;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
-static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
-static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct flow_block_offload *f = type_data;
-
- f->unlocked_driver_cb = true;
-
- switch (type) {
- case TC_SETUP_BLOCK:
- return flow_block_cb_setup_simple(type_data,
- &mlx5e_rep_block_tc_cb_list,
- mlx5e_rep_setup_tc_cb,
- priv, priv, true);
- case TC_SETUP_FT:
- return flow_block_cb_setup_simple(type_data,
- &mlx5e_rep_block_ft_cb_list,
- mlx5e_rep_setup_ft_cb,
- priv, priv, true);
- default:
- return -EOPNOTSUPP;
- }
-}
-
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1540,10 +706,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
/* we want a persistent mac for the uplink rep */
mlx5_query_mac_address(mdev, netdev->dev_addr);
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (MLX5_CAP_GEN(mdev, qos))
- netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
-#endif
+ mlx5e_dcbnl_build_rep_netdev(netdev);
} else {
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
eth_hw_addr_random(netdev);
@@ -1691,6 +854,24 @@ static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
return 0;
}
+static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ if (!rpriv->vport_rx_rule)
+ return;
+
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ rpriv->vport_rx_rule = NULL;
+}
+
+int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
+{
+ rep_vport_rx_rule_destroy(priv);
+
+ return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
+}
+
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1755,9 +936,7 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
-
- mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
@@ -1790,31 +969,25 @@ static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
priv = netdev_priv(netdev);
uplink_priv = &rpriv->uplink_priv;
- mutex_init(&uplink_priv->unready_flows_lock);
- INIT_LIST_HEAD(&uplink_priv->unready_flows);
-
- /* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_rep_tc_init(rpriv);
if (err)
return err;
mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
- /* init indirect block notifications */
- INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
- uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
- err = register_netdevice_notifier_dev_net(rpriv->netdev,
- &uplink_priv->netdevice_nb,
- &uplink_priv->netdevice_nn);
+ mlx5e_rep_bond_init(rpriv);
+ err = mlx5e_rep_tc_netdevice_event_register(rpriv);
if (err) {
- mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
- goto tc_esw_cleanup;
+ mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
+ err);
+ goto err_event_reg;
}
return 0;
-tc_esw_cleanup:
- mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
+err_event_reg:
+ mlx5e_rep_bond_cleanup(rpriv);
+ mlx5e_rep_tc_cleanup(rpriv);
return err;
}
@@ -1844,17 +1017,9 @@ destroy_tises:
static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
{
- struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
-
- /* clean indirect TC block notifications */
- unregister_netdevice_notifier_dev_net(rpriv->netdev,
- &uplink_priv->netdevice_nb,
- &uplink_priv->netdevice_nn);
- mlx5e_rep_indr_clean_block_privs(rpriv);
-
- /* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
- mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
+ mlx5e_rep_tc_netdevice_event_unregister(rpriv);
+ mlx5e_rep_bond_cleanup(rpriv);
+ mlx5e_rep_tc_cleanup(rpriv);
}
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
@@ -1896,13 +1061,8 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
return NOTIFY_OK;
}
- if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
-
- queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
-
- return NOTIFY_OK;
- }
+ if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
+ return mlx5e_rep_tc_event_port_affinity(priv);
return NOTIFY_DONE;
}
@@ -1911,7 +1071,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
u16 max_mtu;
netdev->min_mtu = ETH_MIN_MTU;
@@ -1919,28 +1078,22 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
mlx5e_set_dev_port_mtu(priv);
- INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
- mlx5e_tc_reoffload_flows_work);
+ mlx5e_rep_tc_enable(priv);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
-#endif
}
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
-#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
- cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
+ mlx5e_rep_tc_disable(priv);
mlx5_lag_remove(mdev);
}
@@ -2047,26 +1200,22 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
return 0;
mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
pfnum = PCI_FUNC(dev->pdev->devfn);
- if (rep->vport == MLX5_VPORT_UPLINK) {
+ if (rep->vport == MLX5_VPORT_UPLINK)
devlink_port_attrs_set(&rpriv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
pfnum, false, 0,
&ppid.id[0], ppid.id_len);
- dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
- } else if (rep->vport == MLX5_VPORT_PF) {
+ else if (rep->vport == MLX5_VPORT_PF)
devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
pfnum);
- dl_port_index = rep->vport;
- } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
- rpriv->rep->vport)) {
+ else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
pfnum, rep->vport - 1);
- dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
- }
return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 612b5cf0673d..1d5669801484 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,6 +56,7 @@ struct mlx5e_neigh_update_table {
};
struct mlx5_tc_ct_priv;
+struct mlx5e_rep_bond;
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
@@ -68,13 +69,8 @@ struct mlx5_rep_uplink_priv {
* tc_indr_block_cb_priv_list is used to lookup indirect callback
* private data
*
- * netdevice_nb is the netdev events notifier - used to register
- * tunnel devices for block events
- *
*/
struct list_head tc_indr_block_priv_list;
- struct notifier_block netdevice_nb;
- struct netdev_net_notifier netdevice_nn;
struct mlx5_tun_entropy tun_entropy;
@@ -89,6 +85,9 @@ struct mlx5_rep_uplink_priv {
struct mapping_ctx *tunnel_enc_opts_mapping;
struct mlx5_tc_ct_priv *ct_priv;
+
+ /* support eswitch vports bonding */
+ struct mlx5e_rep_bond *bond;
};
struct mlx5e_rep_priv {
@@ -158,6 +157,22 @@ struct mlx5e_neigh_hash_entry {
enum {
/* set when the encap entry is successfully offloaded into HW */
MLX5_ENCAP_ENTRY_VALID = BIT(0),
+ MLX5_REFORMAT_DECAP = BIT(1),
+};
+
+struct mlx5e_decap_key {
+ struct ethhdr key;
+};
+
+struct mlx5e_decap_entry {
+ struct mlx5e_decap_key key;
+ struct list_head flows;
+ struct hlist_node hlist;
+ refcount_t refcnt;
+ struct completion res_ready;
+ int compl_result;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct rcu_head rcu;
};
struct mlx5e_encap_entry {
@@ -195,6 +210,15 @@ struct mlx5e_rep_sq {
void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
+int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv);
+void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv);
+int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
+ struct net_device *lag_dev);
+void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
+ const struct net_device *netdev,
+ const struct net_device *lag_dev);
+int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup);
+
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
@@ -203,11 +227,6 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe);
-int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
-void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
-
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e2beb89c1832..dbb1c6323967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,6 +42,7 @@
#include "en_tc.h"
#include "eswitch.h"
#include "en_rep.h"
+#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
@@ -300,7 +301,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
* put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down.
*/
- mlx5e_xsk_page_release(rq, dma_info);
+ xsk_buff_free(dma_info->xsk);
else
mlx5e_page_release_dynamic(rq, dma_info, recycle);
}
@@ -385,7 +386,11 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
if (rq->umem) {
int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
- if (unlikely(!mlx5e_xsk_pages_enough_umem(rq, pages_desired)))
+ /* Check in advance that we have enough frames, instead of
+ * allocating one-by-one, failing and moving frames to the
+ * Reuse Ring.
+ */
+ if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired)))
return -ENOMEM;
}
@@ -468,22 +473,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
-static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
- struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
-
- edge_wi = wi + nnops;
-
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- wi->opcode = MLX5_OPCODE_NOP;
- wi->num_wqebbs = 1;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
-}
-
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -492,23 +481,20 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
- u16 pi, contig_wqebbs_room;
+ u16 pi;
int err;
int i;
+ /* Check in advance that we have enough frames, instead of allocating
+ * one-by-one, failing and moving frames to the Reuse Ring.
+ */
if (rq->umem &&
- unlikely(!mlx5e_xsk_pages_enough_umem(rq, MLX5_MPWRQ_PAGES_PER_WQE))) {
+ unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
err = -ENOMEM;
goto err;
}
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
- mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- }
-
+ pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
@@ -527,9 +513,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
MLX5_OPCODE_UMR);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
- sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
- sq->db.ico_wqe[pi].umr.rq = rq;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+ .num_wqebbs = MLX5E_UMR_WQEBBS,
+ .umr.rq = rq,
+ };
+
sq->pc += MLX5E_UMR_WQEBBS;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -618,33 +607,38 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
- struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_icosq_wqe_info *wi;
u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- wi = &sq->db.ico_wqe[ci];
+ wi = &sq->db.wqe_info[ci];
sqcc += wi->num_wqebbs;
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+ (struct mlx5_err_cqe *)cqe);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
- if (likely(wi->opcode == MLX5_OPCODE_UMR))
+ switch (wi->wqe_type) {
+ case MLX5E_ICOSQ_WQE_UMR_RX:
wi->umr.rq->mpwqe.umr_completed++;
- else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
+ break;
+ case MLX5E_ICOSQ_WQE_NOP:
+ break;
+ default:
netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- wi->opcode);
-
+ "Bad WQE type in ICOSQ WQE info: 0x%x\n",
+ wi->wqe_type);
+ }
} while (!last_wqe);
-
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
sq->cc = sqcc;
@@ -1058,12 +1052,24 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}
+static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
+ u32 len, struct xdp_buff *xdp)
+{
+ xdp->data_hard_start = va;
+ xdp->data = va + headroom;
+ xdp_set_data_meta_invalid(xdp);
+ xdp->data_end = xdp->data + len;
+ xdp->rxq = &rq->xdp_rxq;
+ xdp->frame_sz = rq->buff.frame0_sz;
+}
+
struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
struct mlx5e_dma_info *di = wi->di;
u16 rx_headroom = rq->buff.headroom;
+ struct xdp_buff xdp;
struct sk_buff *skb;
void *va, *data;
bool consumed;
@@ -1079,11 +1085,14 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetch(data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp);
rcu_read_unlock();
if (consumed)
return NULL; /* page/packet was consumed by XDP */
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
if (unlikely(!skb))
return NULL;
@@ -1229,12 +1238,12 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
goto free_wqe;
napi_gro_receive(rq->cq.napi, skb);
- mlx5_tc_rep_post_napi_receive(&tc_priv);
+ mlx5_rep_tc_post_napi_receive(&tc_priv);
free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
@@ -1285,12 +1294,12 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
goto mpwrq_cqe_out;
napi_gro_receive(rq->cq.napi, skb);
- mlx5_tc_rep_post_napi_receive(&tc_priv);
+ mlx5_rep_tc_post_napi_receive(&tc_priv);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
@@ -1356,6 +1365,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u16 rx_headroom = rq->buff.headroom;
u32 cqe_bcnt32 = cqe_bcnt;
+ struct xdp_buff xdp;
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
@@ -1377,7 +1387,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prefetch(data);
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, false);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
+ consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp);
rcu_read_unlock();
if (consumed) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
@@ -1385,6 +1396,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL; /* page/packet was consumed by XDP */
}
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
if (unlikely(!skb))
return NULL;
@@ -1501,6 +1514,7 @@ out:
#ifdef CONFIG_MLX5_CORE_IPOIB
+#define MLX5_IB_GRH_SGID_OFFSET 8
#define MLX5_IB_GRH_DGID_OFFSET 24
#define MLX5_GID_SIZE 16
@@ -1514,6 +1528,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
struct net_device *netdev;
struct mlx5e_priv *priv;
char *pseudo_header;
+ u32 flags_rqpn;
u32 qpn;
u8 *dgid;
u8 g;
@@ -1535,7 +1550,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
tstamp = &priv->tstamp;
stats = &priv->channel_stats[rq->ix].rq;
- g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
+ g = (flags_rqpn >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
skb->pkt_type = PACKET_HOST;
@@ -1544,9 +1560,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
else
skb->pkt_type = PACKET_MULTICAST;
- /* TODO: IB/ipoib: Allow mcast packets from other VFs
- * 68996a6e760e5c74654723eeb57bf65628ae87f4
+ /* Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
*/
+ if (g && (qpn == (flags_rqpn & 0xffffff)) &&
+ (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
+ MLX5_GID_SIZE) == 0)) {
+ skb->dev = NULL;
+ return;
+ }
skb_pull(skb, MLX5_IB_GRH_BYTES);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index bbff8d8ded76..46790216ce86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -234,7 +234,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
return err;
}
- err = mlx5e_refresh_tirs(priv, true);
+ err = mlx5e_refresh_tirs(priv, true, false);
if (err)
goto out;
@@ -263,7 +263,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
- mlx5e_refresh_tirs(priv, false);
+ mlx5e_refresh_tirs(priv, false, false);
}
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 30b216d9284c..f009fe09e99b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,8 +32,8 @@
#include "lib/mlx5.h"
#include "en.h"
-#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
+#include "en_accel/en_accel.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@@ -411,18 +411,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
- u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+ int ret;
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+
+ if (priv->q_counter) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->q_counter);
+ ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
+ if (!ret)
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
- if (priv->q_counter &&
- !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
- sizeof(out)))
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
- out, out_of_buffer);
- if (priv->drop_rq_q_counter &&
- !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
- out, sizeof(out)))
- qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
- out_of_buffer);
+ if (priv->drop_rq_q_counter) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->drop_rq_q_counter);
+ ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
+ if (!ret)
+ qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
}
#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
@@ -480,18 +491,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
{
u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
- int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
- u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
return;
- MLX5_SET(query_vnic_env_in, in, opcode,
- MLX5_CMD_OP_QUERY_VNIC_ENV);
- MLX5_SET(query_vnic_env_in, in, op_mod, 0);
- MLX5_SET(query_vnic_env_in, in, other_vport, 0);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+ mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
}
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
@@ -566,15 +573,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
{
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
- MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, other_vport, 0);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
}
#define PPORT_802_3_OFF(c) \
@@ -1424,27 +1428,6 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
-static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec)
-{
- return mlx5e_ipsec_get_count(priv);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec)
-{
- return idx + mlx5e_ipsec_get_strings(priv,
- data + idx * ETH_GSTRING_LEN);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec)
-{
- return idx + mlx5e_ipsec_get_stats(priv, data + idx);
-}
-
-static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec)
-{
- mlx5e_ipsec_update_stats(priv);
-}
-
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
{
return mlx5e_tls_get_count(priv);
@@ -1714,7 +1697,6 @@ MLX5E_DEFINE_STATS_GRP(pme, 0);
MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
-static MLX5E_DEFINE_STATS_GRP(ipsec, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
/* The stats groups order is opposite to the update_stats() order calls */
@@ -1731,7 +1713,10 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(pcie),
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
- &MLX5E_STATS_GRP(ipsec),
+#ifdef CONFIG_MLX5_EN_IPSEC
+ &MLX5E_STATS_GRP(ipsec_sw),
+ &MLX5E_STATS_GRP(ipsec_hw),
+#endif
&MLX5E_STATS_GRP(tls),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 092b39ffa32a..2b83ba990714 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -390,5 +390,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 10f705761666..7fc84f58e28a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -31,6 +31,7 @@
*/
#include <net/flow_dissector.h>
+#include <net/flow_offload.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
@@ -45,10 +46,15 @@
#include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
+#include <net/tc_act/tc_mpls.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
+#include <net/bareudp.h>
+#include <net/bonding.h>
#include "en.h"
#include "en_rep.h"
+#include "en/rep/tc.h"
+#include "en/rep/neigh.h"
#include "en_tc.h"
#include "eswitch.h"
#include "esw/chains.h"
@@ -61,7 +67,7 @@
#include "lib/geneve.h"
#include "diag/en_tc_tracepoint.h"
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
struct mlx5_nic_flow_attr {
u32 action;
@@ -89,6 +95,7 @@ enum {
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
+ MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -122,6 +129,11 @@ struct mlx5e_tc_flow {
u64 cookie;
unsigned long flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
+
+ /* flows sharing the same reformat object - currently mpls decap */
+ struct list_head l3_to_l2_reformat;
+ struct mlx5e_decap_entry *decap_reformat;
+
/* Flow can be associated with multiple encap IDs.
* The number of encaps is bounded by the number of supported
* destinations.
@@ -134,6 +146,7 @@ struct mlx5e_tc_flow {
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
+ struct net_device *orig_dev; /* netdev adding flow first */
int tmp_efi_index;
struct list_head tmp_list; /* temporary flow list used by neigh update */
refcount_t refcnt;
@@ -153,35 +166,12 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5_flow_spec spec;
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct ethhdr eth;
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
-struct tunnel_match_key {
- struct flow_dissector_key_control enc_control;
- struct flow_dissector_key_keyid enc_key_id;
- struct flow_dissector_key_ports enc_tp;
- struct flow_dissector_key_ip enc_ip;
- union {
- struct flow_dissector_key_ipv4_addrs enc_ipv4;
- struct flow_dissector_key_ipv6_addrs enc_ipv6;
- };
-
- int filter_ifindex;
-};
-
-/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
- * Upper TUNNEL_INFO_BITS for general tunnel info.
- * Lower ENC_OPTS_BITS bits for enc_opts.
- */
-#define TUNNEL_INFO_BITS 6
-#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
-#define ENC_OPTS_BITS 2
-#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
-#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
-#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
-
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
@@ -220,8 +210,8 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
fmask = headers_c + soffset;
fval = headers_v + soffset;
- mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
- data = cpu_to_be32(data) >> (32 - (match_len * 8));
+ mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
+ data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
memcpy(fmask, &mask, match_len);
memcpy(fval, &data, match_len);
@@ -568,7 +558,7 @@ struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
{
- u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
void *tirc;
int err;
@@ -582,7 +572,7 @@ static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
- err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
if (err)
goto create_tir_err;
@@ -666,7 +656,7 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
err = mlx5_core_create_tir(hp->func_mdev, in,
- MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
+ &hp->indir_tirn[tt]);
if (err) {
mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_tirs;
@@ -1092,7 +1082,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (IS_ERR(priv->fs.tc.t)) {
mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
- "Failed to create tc offload table\n");
+ "Failed to create tc offload table");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
return PTR_ERR(priv->fs.tc.t);
@@ -1144,6 +1134,11 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack,
struct net_device **encap_dev,
bool *encap_valid);
+static int mlx5e_attach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+static void mlx5e_detach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
@@ -1319,6 +1314,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
+ err = mlx5e_attach_decap(priv, flow, extack);
+ if (err)
+ return err;
+ }
+
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
int mirred_ifindex;
@@ -1428,6 +1429,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(attr->counter_dev, attr->counter);
+
+ if (flow_flag_test(flow, L3_TO_L2_DECAP))
+ mlx5e_detach_decap(priv, flow);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -1704,6 +1708,17 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
kfree_rcu(e, rcu);
}
+static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
+ struct mlx5e_decap_entry *d)
+{
+ WARN_ON(!list_empty(&d->flows));
+
+ if (!d->compl_result)
+ mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
+
+ kfree_rcu(d, rcu);
+}
+
void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -1716,6 +1731,18 @@ void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
mlx5e_encap_dealloc(priv, e);
}
+static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
+ return;
+ hash_del_rcu(&d->hlist);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ mlx5e_decap_dealloc(priv, d);
+}
+
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, int out_index)
{
@@ -1739,6 +1766,29 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
mlx5e_encap_dealloc(priv, e);
}
+static void mlx5e_detach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_decap_entry *d = flow->decap_reformat;
+
+ if (!d)
+ return;
+
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ list_del(&flow->l3_to_l2_reformat);
+ flow->decap_reformat = NULL;
+
+ if (!refcount_dec_and_test(&d->refcnt)) {
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return;
+ }
+ hash_del_rcu(&d->hlist);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ mlx5e_decap_dealloc(priv, d);
+}
+
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
@@ -1823,10 +1873,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
memchr_inv(opt->opt_data, 0, opt->length * 4)) {
*dont_care = false;
- if (opt->opt_class != U16_MAX ||
- opt->type != U8_MAX ||
- memchr_inv(opt->opt_data, 0xFF,
- opt->length * 4)) {
+ if (opt->opt_class != htons(U16_MAX) ||
+ opt->type != U8_MAX) {
NL_SET_ERR_MSG(extack,
"Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
@@ -1863,6 +1911,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct flow_match_enc_opts enc_opts_match;
+ struct tunnel_match_enc_opts tun_enc_opts;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct tunnel_match_key tunnel_key;
@@ -1905,8 +1954,14 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
goto err_enc_opts;
if (!enc_opts_is_dont_care) {
+ memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
+ memcpy(&tun_enc_opts.key, enc_opts_match.key,
+ sizeof(*enc_opts_match.key));
+ memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
+ sizeof(*enc_opts_match.mask));
+
err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
- enc_opts_match.key, &enc_opts_id);
+ &tun_enc_opts, &enc_opts_id);
if (err)
goto err_enc_opts;
}
@@ -1965,6 +2020,32 @@ u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
return flow->tunnel_id;
}
+void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
+ struct flow_match_basic *match, bool outer,
+ void *headers_c, void *headers_v)
+{
+ bool ip_version_cap;
+
+ ip_version_cap = outer ?
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_ip_version);
+
+ if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
+ (match->key->n_proto == htons(ETH_P_IP) ||
+ match->key->n_proto == htons(ETH_P_IPV6))) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
+ match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(match->mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(match->key->n_proto));
+ }
+}
+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -2005,7 +2086,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return err;
}
- flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ /* With mpls over udp we decapsulate using packet reformat
+ * object
+ */
+ if (!netif_is_bareudp(filter_dev))
+ flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
if (!needs_mapping && !sets_mapping)
@@ -2088,6 +2173,20 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
return 0;
}
+static bool skip_key_basic(struct net_device *filter_dev,
+ struct flow_cls_offload *f)
+{
+ /* When doing mpls over udp decap, the user needs to provide
+ * MPLS_UC as the protocol in order to be able to match on mpls
+ * label fields. However, the actual ethertype is IP so we want to
+ * avoid matching on this, otherwise we'll fail the match.
+ */
+ if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
+ return true;
+
+ return false;
+}
+
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -2132,7 +2231,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
@@ -2162,14 +2262,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (err)
return err;
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
+ !skip_key_basic(filter_dev, f)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(match.mask->n_proto));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(match.key->n_proto));
+ mlx5e_tc_set_ethertype(priv->mdev, &match,
+ match_level == outer_match_level,
+ headers_c, headers_v);
if (match.mask->n_proto)
*match_level = MLX5_MATCH_L2;
@@ -2661,7 +2761,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
set_vals = &hdrs[0].vals;
add_vals = &hdrs[1].vals;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2715,10 +2815,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
continue;
if (f->field_bsize == 32) {
- mask_be32 = (__be32)mask;
+ mask_be32 = (__force __be32)(mask);
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
} else if (f->field_bsize == 16) {
- mask_be32 = (__be32)mask;
+ mask_be32 = (__force __be32)(mask);
mask_be16 = *(__be16 *)&mask_be32;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
}
@@ -2794,7 +2894,7 @@ int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
return 0;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
namespace);
@@ -2827,10 +2927,12 @@ void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
static const struct pedit_headers zero_masks = {};
-static int parse_tc_pedit_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
+static int
+parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
int err = -EOPNOTSUPP;
@@ -2866,6 +2968,46 @@ out_err:
return err;
}
+static int
+parse_pedit_to_reformat(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
+{
+ u32 mask, val, offset;
+ u32 *p;
+
+ if (act->id != FLOW_ACTION_MANGLE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
+ NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
+ return -EOPNOTSUPP;
+ }
+
+ mask = ~act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+ p = (u32 *)&parse_attr->eth;
+ *(p + (offset >> 2)) |= (val & mask);
+
+ return 0;
+}
+
+static int parse_tc_pedit_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
+ return parse_pedit_to_reformat(priv, act, parse_attr, extack);
+
+ return parse_pedit_to_modify_hdr(priv, act, namespace,
+ parse_attr, hdrs, extack);
+}
+
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
@@ -3003,16 +3145,19 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct flow_action_entry *act;
bool modify_ip_header;
+ void *headers_c;
void *headers_v;
u16 ethertype;
u8 ip_proto;
int i, err;
+ headers_c = get_match_headers_criteria(actions, spec);
headers_v = get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
- if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
+ if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
+ ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
goto out_ok;
modify_ip_header = false;
@@ -3129,7 +3274,7 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
- err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
+ err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@@ -3195,7 +3340,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
- hdrs, extack);
+ parse_attr, hdrs, NULL, extack);
if (err)
return err;
@@ -3289,12 +3434,22 @@ static inline int cmp_encap_info(struct encap_key *a,
a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
}
+static inline int cmp_decap_info(struct mlx5e_decap_key *a,
+ struct mlx5e_decap_key *b)
+{
+ return memcmp(&a->key, &b->key, sizeof(b->key));
+}
+
static inline int hash_encap_info(struct encap_key *key)
{
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
key->tc_tunnel->tunnel_type);
}
+static inline int hash_decap_info(struct mlx5e_decap_key *key)
+{
+ return jhash(&key->key, sizeof(key->key), 0);
+}
static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
@@ -3314,6 +3469,11 @@ bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
return refcount_inc_not_zero(&e->refcnt);
}
+static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
static struct mlx5e_encap_entry *
mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
uintptr_t hash_key)
@@ -3334,6 +3494,24 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
return NULL;
}
+static struct mlx5e_decap_entry *
+mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_decap_key r_key;
+ struct mlx5e_decap_entry *e;
+
+ hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
+ hlist, hash_key) {
+ r_key = e->key;
+ if (!cmp_decap_info(&r_key, key) &&
+ mlx5e_decap_take(e))
+ return e;
+ }
+ return NULL;
+}
+
static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
{
size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
@@ -3479,6 +3657,84 @@ out_err_init:
return err;
}
+static int mlx5e_attach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_decap_entry *d;
+ struct mlx5e_decap_key key;
+ uintptr_t hash_key;
+ int err = 0;
+
+ parse_attr = attr->parse_attr;
+ if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "encap header larger than max supported");
+ return -EOPNOTSUPP;
+ }
+
+ key.key = parse_attr->eth;
+ hash_key = hash_decap_info(&key);
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ d = mlx5e_decap_get(priv, &key, hash_key);
+ if (d) {
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ wait_for_completion(&d->res_ready);
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ if (d->compl_result) {
+ err = -EREMOTEIO;
+ goto out_free;
+ }
+ goto found;
+ }
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ d->key = key;
+ refcount_set(&d->refcnt, 1);
+ init_completion(&d->res_ready);
+ INIT_LIST_HEAD(&d->flows);
+ hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
+ sizeof(parse_attr->eth),
+ &parse_attr->eth,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(d->pkt_reformat)) {
+ err = PTR_ERR(d->pkt_reformat);
+ d->compl_result = err;
+ }
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ complete_all(&d->res_ready);
+ if (err)
+ goto out_free;
+
+found:
+ flow->decap_reformat = d;
+ attr->decap_pkt_reformat = d->pkt_reformat;
+ list_add(&flow->l3_to_l2_reformat, &d->flows);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return 0;
+
+out_free:
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ mlx5e_decap_put(priv, d);
+ return err;
+
+out_err:
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return err;
+}
+
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
@@ -3532,6 +3788,28 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
return 0;
}
+static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
+ struct net_device *out_dev)
+{
+ struct net_device *fdb_out_dev = out_dev;
+ struct net_device *uplink_upper;
+
+ rcu_read_lock();
+ uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
+ if (uplink_upper && netif_is_lag_master(uplink_upper) &&
+ uplink_upper == out_dev) {
+ fdb_out_dev = uplink_dev;
+ } else if (netif_is_lag_master(out_dev)) {
+ fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
+ if (fdb_out_dev &&
+ (!mlx5e_eswitch_rep(fdb_out_dev) ||
+ !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
+ fdb_out_dev = NULL;
+ }
+ rcu_read_unlock();
+ return fdb_out_dev;
+}
+
static int add_vlan_push_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr,
struct net_device **out_dev,
@@ -3713,7 +3991,8 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv,
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ struct net_device *filter_dev)
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -3727,6 +4006,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
+ bool mpls_push = false;
if (!flow_action_has_entries(flow_action))
return -EINVAL;
@@ -3741,15 +4021,48 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
+ case FLOW_ACTION_MPLS_PUSH:
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+ reformat_l2_to_l3_tunnel) ||
+ act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mpls push is supported only for mpls_uc protocol");
+ return -EOPNOTSUPP;
+ }
+ mpls_push = true;
+ break;
+ case FLOW_ACTION_MPLS_POP:
+ /* we only support mpls pop if it is the first action
+ * and the filter net device is bareudp. Subsequent
+ * actions can be pedit and the last can be mirred
+ * egress redirect.
+ */
+ if (i) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mpls pop supported only as first action");
+ return -EOPNOTSUPP;
+ }
+ if (!netif_is_bareudp(filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mpls pop supported only on bareudp devices");
+ return -EOPNOTSUPP;
+ }
+
+ parse_attr->eth.h_proto = act->mpls_pop.proto;
+ action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_flag_set(flow, L3_TO_L2_DECAP);
+ break;
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
- hdrs, extack);
+ parse_attr, hdrs, flow, extack);
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- attr->split_count = attr->out_count;
+ if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ attr->split_count = attr->out_count;
+ }
break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
@@ -3771,6 +4084,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EINVAL;
}
+ if (mpls_push && !netif_is_bareudp(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mpls is supported only through a bareudp device");
+ return -EOPNOTSUPP;
+ }
+
if (ft_flow && out_dev == priv->netdev) {
/* Ignore forward to self rules generated
* by adding both mlx5 devs to the flow table
@@ -3806,7 +4125,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- struct net_device *uplink_upper;
if (is_duplicated_output_device(priv->netdev,
out_dev,
@@ -3818,14 +4136,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
ifindexes[if_count] = out_dev->ifindex;
if_count++;
- rcu_read_lock();
- uplink_upper =
- netdev_master_upper_dev_get_rcu(uplink_dev);
- if (uplink_upper &&
- netif_is_lag_master(uplink_upper) &&
- uplink_upper == out_dev)
- out_dev = uplink_dev;
- rcu_read_unlock();
+ out_dev = get_fdb_out_dev(uplink_dev, out_dev);
+ if (!out_dev)
+ return -ENODEV;
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
@@ -4097,6 +4410,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->mod_hdr);
INIT_LIST_HEAD(&flow->hairpin);
+ INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
@@ -4166,7 +4480,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
+ err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
if (err)
goto err_free;
@@ -4352,11 +4666,21 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
return err;
}
+static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
+ struct mlx5e_rep_priv *rpriv)
+{
+ /* Offloaded flow rule is allowed to duplicate on non-uplink representor
+ * sharing tc block with other slaves of a lag device.
+ */
+ return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK;
+}
+
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
struct netlink_ext_ack *extack = f->common.extack;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_tc_flow *flow;
int err = 0;
@@ -4364,6 +4688,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
rcu_read_unlock();
if (flow) {
+ /* Same flow rule offloaded to non-uplink representor sharing tc block,
+ * just return 0.
+ */
+ if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
+ goto out;
+
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
netdev_warn_once(priv->netdev,
@@ -4378,6 +4708,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
if (err)
goto out;
+ /* Flow rule offloaded to non-uplink representor sharing tc block,
+ * set the flow's owner dev.
+ */
+ if (is_flow_rule_duplicate_allowed(dev, rpriv))
+ flow->orig_dev = dev;
+
err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err)
goto err_free;
@@ -4729,7 +5065,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
- const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
+ const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *priv;
struct mapping_ctx *mapping;
@@ -4818,146 +5154,35 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
mutex_unlock(&rpriv->unready_flows_lock);
}
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
-static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv,
- u32 tunnel_id)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct flow_dissector_key_enc_opts enc_opts = {};
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct metadata_dst *tun_dst;
- struct tunnel_match_key key;
- u32 tun_id, enc_opts_id;
- struct net_device *dev;
- int err;
-
- enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
- tun_id = tunnel_id >> ENC_OPTS_BITS;
-
- if (!tun_id)
- return true;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
-
- err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
- if (err) {
- WARN_ON_ONCE(true);
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel for tun_id: %d, err: %d\n",
- tun_id, err);
- return false;
- }
-
- if (enc_opts_id) {
- err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
- enc_opts_id, &enc_opts);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
- enc_opts_id, err);
- return false;
- }
- }
-
- tun_dst = tun_rx_dst(enc_opts.len);
- if (!tun_dst) {
- WARN_ON_ONCE(true);
- return false;
- }
-
- ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- 0, /* label */
- key.enc_tp.src, key.enc_tp.dst,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- TUNNEL_KEY);
-
- if (enc_opts.len)
- ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
- enc_opts.len, enc_opts.dst_opt_type);
-
- skb_dst_set(skb, (struct dst_entry *)tun_dst);
- dev = dev_get_by_index(&init_net, key.filter_ifindex);
- if (!dev) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel device with ifindex: %d\n",
- key.filter_ifindex);
- return false;
+static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
+ struct flow_cls_offload *cls_flower,
+ unsigned long flags)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
+ flags);
+ case FLOW_CLS_DESTROY:
+ return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
+ flags);
+ case FLOW_CLS_STATS:
+ return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
+ flags);
+ default:
+ return -EOPNOTSUPP;
}
-
- /* Set tun_dev so we do dev_put() after datapath */
- tc_priv->tun_dev = dev;
-
- skb->dev = dev;
-
- return true;
}
-#endif /* CONFIG_NET_TC_SKB_EXT */
-bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv)
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct tc_skb_ext *tc_skb_ext;
- struct mlx5_eswitch *esw;
- struct mlx5e_priv *priv;
- int tunnel_moffset;
- int err;
-
- reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
- if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
- reg_c0 = 0;
- reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
-
- if (!reg_c0)
- return true;
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
+ struct mlx5e_priv *priv = cb_priv;
- priv = netdev_priv(skb->dev);
- esw = priv->mdev->priv.eswitch;
-
- err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find chain for chain tag: %d, err: %d\n",
- reg_c0, err);
- return false;
- }
-
- if (chain) {
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
- if (!tc_skb_ext) {
- WARN_ON(1);
- return false;
- }
-
- tc_skb_ext->chain = chain;
-
- tuple_id = reg_c1 & TUPLE_ID_MAX;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
- return false;
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
+ default:
+ return -EOPNOTSUPP;
}
-
- tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
- tunnel_id = reg_c1 >> (8 * tunnel_moffset);
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
-#endif /* CONFIG_NET_TC_SKB_EXT */
-
- return true;
-}
-
-void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
-{
- if (tc_priv->tun_dev)
- dev_put(tc_priv->tun_dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index abdcfa4c4e0e..5c330b0cae21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -34,11 +34,41 @@
#define __MLX5_EN_TC_H__
#include <net/pkt_cls.h>
+#include "en.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH
+struct tunnel_match_key {
+ struct flow_dissector_key_control enc_control;
+ struct flow_dissector_key_keyid enc_key_id;
+ struct flow_dissector_key_ports enc_tp;
+ struct flow_dissector_key_ip enc_ip;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
+
+ int filter_ifindex;
+};
+
+struct tunnel_match_enc_opts {
+ struct flow_dissector_key_enc_opts key;
+ struct flow_dissector_key_enc_opts mask;
+};
+
+/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
+ * Upper TUNNEL_INFO_BITS for general tunnel info.
+ * Lower ENC_OPTS_BITS bits for enc_opts.
+ */
+#define TUNNEL_INFO_BITS 6
+#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
+#define ENC_OPTS_BITS 2
+#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
+#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
+#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
+
enum {
MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLAG_EGRESS_BIT,
@@ -50,9 +80,6 @@ enum {
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
-int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
-void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
-
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
@@ -119,11 +146,6 @@ struct mlx5e_tc_update_priv {
struct net_device *tun_dev;
};
-bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv);
-
-void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
-
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
@@ -148,6 +170,26 @@ void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
struct mlx5e_tc_flow;
u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
+void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
+ struct flow_match_basic *match, bool outer,
+ void *headers_c, void *headers_v);
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
+void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
+static inline int
+mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
@@ -156,6 +198,10 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
{
return 0;
}
+
+static inline int
+mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{ return -EOPNOTSUPP; }
#endif
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 119a5c6cc167..6d406063aca4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -265,8 +265,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
-netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
+void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
@@ -324,7 +324,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
#endif
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
#ifdef CONFIG_MLX5_EN_IPSEC
wqe->eth = cur_eth;
#endif
@@ -372,47 +373,38 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more);
- return NETDEV_TX_OK;
+ return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq;
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
- wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
- /* might send skbs and update wqe and pi */
- skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
- if (unlikely(!skb))
- return NETDEV_TX_OK;
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
+ goto out;
- return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
-}
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
-static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
- struct mlx5_err_cqe *err_cqe)
-{
- struct mlx5_cqwq *wq = &sq->cq.wq;
- u32 ci;
+ /* May update the WQE, but may not post other WQEs. */
+ if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
+ goto out;
- ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
+ mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
- netdev_err(sq->channel->netdev,
- "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
- sq->cq.mcq.cqn, ci, sq->sqn,
- get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
- err_cqe->syndrome, err_cqe->vendor_err_synd);
- mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
+out:
+ return NETDEV_TX_OK;
}
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
@@ -501,7 +493,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
- mlx5e_dump_error_cqe(sq,
+ mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq,
@@ -585,11 +577,9 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
}
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey,
- bool xmit_more)
+void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5i_tx_wqe *wqe;
struct mlx5_wqe_datagram_seg *datagram;
@@ -599,9 +589,9 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, pi, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
u8 num_wqebbs, opcode;
+ u16 headlen, ihs, pi;
u32 num_bytes;
int num_dma;
__be16 mss;
@@ -637,14 +627,8 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < num_wqebbs)) {
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- }
-
- mlx5i_sq_fetch_wqe(sq, &wqe, pi);
+ pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
+ wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
/* fill wqe */
wi = &sq->db.wqe_info[pi];
@@ -672,12 +656,10 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg, xmit_more);
- return NETDEV_TX_OK;
+ return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index acb20215a33b..8480278f2ee2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,11 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
struct mlx5e_tx_wqe *nopwqe;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- sq->db.ico_wqe[pi].num_wqebbs = 1;
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_NOP,
+ .num_wqebbs = 1,
+ };
+
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ce6c621af043..31ef9f8420c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -36,7 +36,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
-#include <linux/mlx5/cmd.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
@@ -102,12 +101,11 @@ struct mlx5_eq_table {
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
- u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
MLX5_SET(destroy_eq_in, in, eq_number, eqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_eq, in);
}
/* caller must eventually call mlx5_cq_put on the returned cq */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
new file mode 100644
index 000000000000..d46f8b225ebe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+#include "lgcy.h"
+
+static void esw_acl_egress_lgcy_rules_destroy(struct mlx5_vport *vport)
+{
+ esw_acl_egress_vlan_destroy(vport);
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
+ mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
+ vport->egress.legacy.drop_rule = NULL;
+ }
+}
+
+static int esw_acl_egress_lgcy_groups_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_group *drop_grp;
+ u32 *flow_group_in;
+ int err = 0;
+
+ err = esw_acl_egress_vlan_grp_create(esw, vport);
+ if (err)
+ return err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ drop_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
+ if (IS_ERR(drop_grp)) {
+ err = PTR_ERR(drop_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto drop_grp_err;
+ }
+
+ vport->egress.legacy.drop_grp = drop_grp;
+ kvfree(flow_group_in);
+ return 0;
+
+drop_grp_err:
+ kvfree(flow_group_in);
+alloc_err:
+ esw_acl_egress_vlan_grp_destroy(vport);
+ return err;
+}
+
+static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_grp)) {
+ mlx5_destroy_flow_group(vport->egress.legacy.drop_grp);
+ vport->egress.legacy.drop_grp = NULL;
+ }
+ esw_acl_egress_vlan_grp_destroy(vport);
+}
+
+int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_destination drop_ctr_dst = {};
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_fc *drop_counter = NULL;
+ struct mlx5_flow_act flow_act = {};
+ /* The egress acl table contains 2 rules:
+ * 1)Allow traffic with vlan_tag=vst_vlan_id
+ * 2)Drop all other traffic.
+ */
+ int table_size = 2;
+ int dest_num = 0;
+ int err = 0;
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ drop_counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(drop_counter))
+ esw_warn(esw->dev,
+ "vport[%d] configure egress drop rule counter err(%ld)\n",
+ vport->vport, PTR_ERR(drop_counter));
+ vport->egress.legacy.drop_counter = drop_counter;
+ }
+
+ esw_acl_egress_lgcy_rules_destroy(vport);
+
+ if (!vport->info.vlan && !vport->info.qos) {
+ esw_acl_egress_lgcy_cleanup(esw, vport);
+ return 0;
+ }
+
+ if (!IS_ERR_OR_NULL(vport->egress.acl))
+ return 0;
+
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ table_size);
+ if (IS_ERR_OR_NULL(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ goto out;
+ }
+
+ err = esw_acl_egress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
+
+ esw_debug(esw->dev,
+ "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->info.vlan, vport->info.qos);
+
+ /* Allowed vlan rule */
+ err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ if (err)
+ goto out;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach egress drop flow counter */
+ if (!IS_ERR_OR_NULL(drop_counter)) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
+ vport->egress.legacy.drop_rule =
+ mlx5_add_flow_rules(vport->egress.acl, NULL,
+ &flow_act, dst, dest_num);
+ if (IS_ERR(vport->egress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->egress.legacy.drop_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.legacy.drop_rule = NULL;
+ goto out;
+ }
+
+ return err;
+
+out:
+ esw_acl_egress_lgcy_cleanup(esw, vport);
+ return err;
+}
+
+void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->egress.acl))
+ goto clean_drop_counter;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+ esw_acl_egress_lgcy_rules_destroy(vport);
+ esw_acl_egress_lgcy_groups_destroy(vport);
+ esw_acl_egress_table_destroy(vport);
+
+clean_drop_counter:
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) {
+ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
+ vport->egress.legacy.drop_counter = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
new file mode 100644
index 000000000000..07b2acd7e6b3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+#include "ofld.h"
+
+static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
+{
+ if (!vport->egress.offloads.fwd_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->egress.offloads.fwd_rule);
+ vport->egress.offloads.fwd_rule = NULL;
+}
+
+static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct mlx5_flow_destination *fwd_dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ int err = 0;
+
+ esw_debug(esw->dev, "vport(%d) configure egress acl rule fwd2vport(%d)\n",
+ vport->vport, fwd_dest->vport.num);
+
+ /* Delete the old egress forward-to-vport rule if any */
+ esw_acl_egress_ofld_fwd2vport_destroy(vport);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ vport->egress.offloads.fwd_rule =
+ mlx5_add_flow_rules(vport->egress.acl, NULL,
+ &flow_act, fwd_dest, 1);
+ if (IS_ERR(vport->egress.offloads.fwd_rule)) {
+ err = PTR_ERR(vport->egress.offloads.fwd_rule);
+ esw_warn(esw->dev,
+ "vport(%d) failed to add fwd2vport acl rule err(%d)\n",
+ vport->vport, err);
+ vport->egress.offloads.fwd_rule = NULL;
+ }
+
+ return err;
+}
+
+static int esw_acl_egress_ofld_rules_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct mlx5_flow_destination *fwd_dest)
+{
+ int err = 0;
+ int action;
+
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) prio tag packets - pop the prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+ esw_debug(esw->dev,
+ "vport[%d] configure prio tag egress rules\n", vport->vport);
+
+ action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ action |= fwd_dest ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ /* prio tag vlan rule - pop it so vport receives untagged packets */
+ err = esw_egress_acl_vlan_create(esw, vport, fwd_dest, 0, action);
+ if (err)
+ goto prio_err;
+ }
+
+ if (fwd_dest) {
+ err = esw_acl_egress_ofld_fwd2vport_create(esw, vport, fwd_dest);
+ if (err)
+ goto fwd_err;
+ }
+
+ return 0;
+
+fwd_err:
+ esw_acl_egress_vlan_destroy(vport);
+prio_err:
+ return err;
+}
+
+static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
+{
+ esw_acl_egress_vlan_destroy(vport);
+ esw_acl_egress_ofld_fwd2vport_destroy(vport);
+}
+
+static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fwd_grp;
+ u32 *flow_group_in;
+ u32 flow_index = 0;
+ int ret = 0;
+
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
+ ret = esw_acl_egress_vlan_grp_create(esw, vport);
+ if (ret)
+ return ret;
+
+ flow_index++;
+ }
+
+ if (!mlx5_esw_acl_egress_fwd2vport_supported(esw))
+ goto out;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ ret = -ENOMEM;
+ goto fwd_grp_err;
+ }
+
+ /* This group holds 1 FTE to forward all packets to other vport
+ * when bond vports is supported.
+ */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+ fwd_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
+ if (IS_ERR(fwd_grp)) {
+ ret = PTR_ERR(fwd_grp);
+ esw_warn(esw->dev,
+ "Failed to create vport[%d] egress fwd2vport flow group, err(%d)\n",
+ vport->vport, ret);
+ kvfree(flow_group_in);
+ goto fwd_grp_err;
+ }
+ vport->egress.offloads.fwd_grp = fwd_grp;
+ kvfree(flow_group_in);
+ return 0;
+
+fwd_grp_err:
+ esw_acl_egress_vlan_grp_destroy(vport);
+out:
+ return ret;
+}
+
+static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.offloads.fwd_grp)) {
+ mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp);
+ vport->egress.offloads.fwd_grp = NULL;
+ }
+ esw_acl_egress_vlan_grp_destroy(vport);
+}
+
+int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int table_size = 0;
+ int err;
+
+ if (!mlx5_esw_acl_egress_fwd2vport_supported(esw) &&
+ !MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ return 0;
+
+ esw_acl_egress_ofld_rules_destroy(vport);
+
+ if (mlx5_esw_acl_egress_fwd2vport_supported(esw))
+ table_size++;
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ table_size++;
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
+ if (IS_ERR_OR_NULL(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ return err;
+ }
+
+ err = esw_acl_egress_ofld_groups_create(esw, vport);
+ if (err)
+ goto group_err;
+
+ esw_debug(esw->dev, "vport[%d] configure egress rules\n", vport->vport);
+
+ err = esw_acl_egress_ofld_rules_create(esw, vport, NULL);
+ if (err)
+ goto rules_err;
+
+ return 0;
+
+rules_err:
+ esw_acl_egress_ofld_groups_destroy(vport);
+group_err:
+ esw_acl_egress_table_destroy(vport);
+ return err;
+}
+
+void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport)
+{
+ esw_acl_egress_ofld_rules_destroy(vport);
+ esw_acl_egress_ofld_groups_destroy(vport);
+ esw_acl_egress_table_destroy(vport);
+}
+
+int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
+ u16 passive_vport_num)
+{
+ struct mlx5_vport *passive_vport = mlx5_eswitch_get_vport(esw, passive_vport_num);
+ struct mlx5_vport *active_vport = mlx5_eswitch_get_vport(esw, active_vport_num);
+ struct mlx5_flow_destination fwd_dest = {};
+
+ if (IS_ERR(active_vport))
+ return PTR_ERR(active_vport);
+ if (IS_ERR(passive_vport))
+ return PTR_ERR(passive_vport);
+
+ /* Cleanup and recreate rules WITHOUT fwd2vport of active vport */
+ esw_acl_egress_ofld_rules_destroy(active_vport);
+ esw_acl_egress_ofld_rules_create(esw, active_vport, NULL);
+
+ /* Cleanup and recreate all rules + fwd2vport rule of passive vport to forward */
+ esw_acl_egress_ofld_rules_destroy(passive_vport);
+ fwd_dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ fwd_dest.vport.num = active_vport_num;
+ fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ fwd_dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+
+ return esw_acl_egress_ofld_rules_create(esw, passive_vport, &fwd_dest);
+}
+
+int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ esw_acl_egress_ofld_rules_destroy(vport);
+ return esw_acl_egress_ofld_rules_create(esw, vport, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
new file mode 100644
index 000000000000..22f4c1c28006
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+
+struct mlx5_flow_table *
+esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ int acl_supported;
+ int vport_index;
+ int err;
+
+ acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ?
+ MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) :
+ MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support);
+
+ if (!acl_supported)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
+ ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
+
+ vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
+ vport_num);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, size, 0, vport_num);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num,
+ ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress", err);
+ }
+ return acl;
+}
+
+int esw_egress_acl_vlan_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct mlx5_flow_destination *fwd_dest,
+ u16 vlan_id, u32 flow_action)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ if (vport->egress.allowed_vlan)
+ return -EEXIST;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = flow_action;
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, fwd_dest, 0);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ }
+
+ kvfree(spec);
+ return err;
+}
+
+void esw_acl_egress_vlan_destroy(struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+ mlx5_del_flow_rules(vport->egress.allowed_vlan);
+ vport->egress.allowed_vlan = NULL;
+ }
+}
+
+int esw_acl_egress_vlan_grp_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *vlan_grp;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int ret = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ vlan_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
+ if (IS_ERR(vlan_grp)) {
+ ret = PTR_ERR(vlan_grp);
+ esw_warn(esw->dev,
+ "Failed to create E-Switch vport[%d] egress pop vlans flow group, err(%d)\n",
+ vport->vport, ret);
+ goto out;
+ }
+ vport->egress.vlan_grp = vlan_grp;
+
+out:
+ kvfree(flow_group_in);
+ return ret;
+}
+
+void esw_acl_egress_vlan_grp_destroy(struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.vlan_grp)) {
+ mlx5_destroy_flow_group(vport->egress.vlan_grp);
+ vport->egress.vlan_grp = NULL;
+ }
+}
+
+void esw_acl_egress_table_destroy(struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ mlx5_destroy_flow_table(vport->egress.acl);
+ vport->egress.acl = NULL;
+}
+
+void esw_acl_ingress_table_destroy(struct mlx5_vport *vport)
+{
+ if (!vport->ingress.acl)
+ return;
+
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+}
+
+void esw_acl_ingress_allow_rule_destroy(struct mlx5_vport *vport)
+{
+ if (!vport->ingress.allow_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->ingress.allow_rule);
+ vport->ingress.allow_rule = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
new file mode 100644
index 000000000000..8dc4cab66a71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#ifndef __MLX5_ESWITCH_ACL_HELPER_H__
+#define __MLX5_ESWITCH_ACL_HELPER_H__
+
+#include "eswitch.h"
+
+/* General acl helper functions */
+struct mlx5_flow_table *
+esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size);
+
+/* Egress acl helper functions */
+void esw_acl_egress_table_destroy(struct mlx5_vport *vport);
+int esw_egress_acl_vlan_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ struct mlx5_flow_destination *fwd_dest,
+ u16 vlan_id, u32 flow_action);
+void esw_acl_egress_vlan_destroy(struct mlx5_vport *vport);
+int esw_acl_egress_vlan_grp_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_egress_vlan_grp_destroy(struct mlx5_vport *vport);
+
+/* Ingress acl helper functions */
+void esw_acl_ingress_table_destroy(struct mlx5_vport *vport);
+void esw_acl_ingress_allow_rule_destroy(struct mlx5_vport *vport);
+
+#endif /* __MLX5_ESWITCH_ACL_HELPER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
new file mode 100644
index 000000000000..9bda4fe2eafa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+#include "lgcy.h"
+
+static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport)
+{
+ if (vport->ingress.legacy.drop_rule) {
+ mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
+ vport->ingress.legacy.drop_rule = NULL;
+ }
+ esw_acl_ingress_allow_rule_destroy(vport);
+}
+
+static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto spoof_err;
+ }
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
+ vport->vport, err);
+ goto untagged_err;
+ }
+ vport->ingress.legacy.allow_untagged_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto allow_spoof_err;
+ }
+ vport->ingress.legacy.allow_spoofchk_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto drop_err;
+ }
+ vport->ingress.legacy.drop_grp = g;
+ kvfree(flow_group_in);
+ return 0;
+
+drop_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
+ }
+allow_spoof_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+untagged_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+spoof_err:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
+{
+ if (vport->ingress.legacy.allow_spoofchk_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
+ vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_only_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
+ vport->ingress.legacy.allow_untagged_only_grp = NULL;
+ }
+ if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
+ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
+ }
+ if (vport->ingress.legacy.drop_grp) {
+ mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
+ vport->ingress.legacy.drop_grp = NULL;
+ }
+}
+
+int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_destination drop_ctr_dst = {};
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec = NULL;
+ struct mlx5_fc *counter = NULL;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+ int dest_num = 0;
+ int err = 0;
+ u8 *smac_v;
+
+ esw_acl_ingress_lgcy_rules_destroy(vport);
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ counter = mlx5_fc_create(esw->dev, false);
+ if (IS_ERR(counter))
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ vport->ingress.legacy.drop_counter = counter;
+ }
+
+ if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+ return 0;
+ }
+
+ if (!vport->ingress.acl) {
+ vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ table_size);
+ if (IS_ERR_OR_NULL(vport->ingress.acl)) {
+ err = PTR_ERR(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ return err;
+ }
+
+ err = esw_acl_ingress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
+ }
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->info.vlan, vport->info.qos);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (vport->info.vlan || vport->info.qos)
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
+
+ if (vport->info.spoofchk) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.smac_15_0);
+ smac_v = MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, vport->info.mac);
+ }
+
+ /* Create ingress allow rule */
+ memset(spec, 0, sizeof(*spec));
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ goto out;
+ }
+
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ /* Attach drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
+ vport->ingress.legacy.drop_rule =
+ mlx5_add_flow_rules(vport->ingress.acl, NULL,
+ &flow_act, dst, dest_num);
+ if (IS_ERR(vport->ingress.legacy.drop_rule)) {
+ err = PTR_ERR(vport->ingress.legacy.drop_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.legacy.drop_rule = NULL;
+ goto out;
+ }
+ kvfree(spec);
+ return 0;
+
+out:
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
+ kvfree(spec);
+ return err;
+}
+
+void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->ingress.acl))
+ goto clean_drop_counter;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+ esw_acl_ingress_lgcy_rules_destroy(vport);
+ esw_acl_ingress_lgcy_groups_destroy(vport);
+ esw_acl_ingress_table_destroy(vport);
+
+clean_drop_counter:
+ if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_counter)) {
+ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
+ vport->ingress.legacy.drop_counter = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
new file mode 100644
index 000000000000..4e55d7225a26
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "helper.h"
+#include "ofld.h"
+
+static bool
+esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
+{
+ return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
+ mlx5_eswitch_is_vf_vport(esw, vport->vport));
+}
+
+static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) Untagged packets - push prio tag VLAN and modify metadata if
+ * required, allow
+ * Unmatched traffic is allowed by default
+ */
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Untagged packets - push prio tag VLAN, allow */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_act.vlan[0].ethtype = ETH_P_8021Q;
+ flow_act.vlan[0].vid = 0;
+ flow_act.vlan[0].prio = 0;
+
+ if (vport->ingress.offloads.modify_metadata_rule) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ }
+
+ vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress untagged allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ }
+
+ kvfree(spec);
+ return err;
+}
+
+static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_flow_act flow_act = {};
+ int err = 0;
+ u32 key;
+
+ key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
+ key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+ MLX5_SET(set_action_in, action, data, key);
+ MLX5_SET(set_action_in, action, offset,
+ ESW_SOURCE_PORT_METADATA_OFFSET);
+ MLX5_SET(set_action_in, action, length,
+ ESW_SOURCE_PORT_METADATA_BITS);
+
+ vport->ingress.offloads.modify_metadata =
+ mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ 1, action);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata);
+ esw_warn(esw->dev,
+ "failed to alloc modify header for vport %d ingress acl (%d)\n",
+ vport->vport, err);
+ return err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ vport->ingress.offloads.modify_metadata_rule =
+ mlx5_add_flow_rules(vport->ingress.acl,
+ NULL, &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
+ err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
+ esw_warn(esw->dev,
+ "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
+ vport->vport, err);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
+ vport->ingress.offloads.modify_metadata_rule = NULL;
+ }
+ return err;
+}
+
+static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!vport->ingress.offloads.modify_metadata_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
+ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
+ vport->ingress.offloads.modify_metadata_rule = NULL;
+}
+
+static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int err;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ err = esw_acl_ingress_mod_metadata_create(esw, vport);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport(%d) create ingress modify metadata, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+ }
+
+ if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
+ err = esw_acl_ingress_prio_tag_create(esw, vport);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport(%d) create ingress prio tag rule, err(%d)\n",
+ vport->vport, err);
+ goto prio_tag_err;
+ }
+ }
+
+ return 0;
+
+prio_tag_err:
+ esw_acl_ingress_mod_metadata_destroy(esw, vport);
+ return err;
+}
+
+static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ esw_acl_ingress_allow_rule_destroy(vport);
+ esw_acl_ingress_mod_metadata_destroy(esw, vport);
+}
+
+static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+ u32 flow_index = 0;
+ int ret = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
+ /* This group is to hold FTE to match untagged packets when prio_tag
+ * is enabled.
+ */
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in, match_criteria);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
+ esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
+ vport->vport, ret);
+ goto prio_tag_err;
+ }
+ vport->ingress.offloads.metadata_prio_tag_grp = g;
+ flow_index++;
+ }
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ /* This group holds an FTE with no match to add metadata for
+ * tagged packets if prio-tag is enabled, or for all untagged
+ * traffic in case prio-tag is disabled.
+ */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
+ esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
+ vport->vport, ret);
+ goto metadata_err;
+ }
+ vport->ingress.offloads.metadata_allmatch_grp = g;
+ }
+
+ kvfree(flow_group_in);
+ return 0;
+
+metadata_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
+ vport->ingress.offloads.metadata_prio_tag_grp = NULL;
+ }
+prio_tag_err:
+ kvfree(flow_group_in);
+ return ret;
+}
+
+static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
+{
+ if (vport->ingress.offloads.metadata_allmatch_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
+ vport->ingress.offloads.metadata_allmatch_grp = NULL;
+ }
+
+ if (vport->ingress.offloads.metadata_prio_tag_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
+ vport->ingress.offloads.metadata_prio_tag_grp = NULL;
+ }
+}
+
+int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int num_ftes = 0;
+ int err;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ !esw_acl_ingress_prio_tag_enabled(esw, vport))
+ return 0;
+
+ esw_acl_ingress_allow_rule_destroy(vport);
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+ num_ftes++;
+ if (esw_acl_ingress_prio_tag_enabled(esw, vport))
+ num_ftes++;
+
+ vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ num_ftes);
+ if (IS_ERR_OR_NULL(vport->ingress.acl)) {
+ err = PTR_ERR(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ return err;
+ }
+
+ err = esw_acl_ingress_ofld_groups_create(esw, vport);
+ if (err)
+ goto group_err;
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules\n", vport->vport);
+
+ err = esw_acl_ingress_ofld_rules_create(esw, vport);
+ if (err)
+ goto rules_err;
+
+ return 0;
+
+rules_err:
+ esw_acl_ingress_ofld_groups_destroy(vport);
+group_err:
+ esw_acl_ingress_table_destroy(vport);
+ return err;
+}
+
+void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ esw_acl_ingress_ofld_rules_destroy(esw, vport);
+ esw_acl_ingress_ofld_groups_destroy(vport);
+ esw_acl_ingress_table_destroy(vport);
+}
+
+/* Caller must hold rtnl_lock */
+int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 metadata)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+ int err;
+
+ if (WARN_ON_ONCE(IS_ERR(vport))) {
+ esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+ err = PTR_ERR(vport);
+ goto out;
+ }
+
+ esw_acl_ingress_ofld_rules_destroy(esw, vport);
+
+ vport->metadata = metadata ? metadata : vport->default_metadata;
+
+ /* Recreate ingress acl rules with vport->metadata */
+ err = esw_acl_ingress_ofld_rules_create(esw, vport);
+ if (err)
+ goto out;
+
+ return 0;
+
+out:
+ vport->metadata = vport->default_metadata;
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h
new file mode 100644
index 000000000000..44c152da3d83
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/lgcy.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#ifndef __MLX5_ESWITCH_ACL_LGCY_H__
+#define __MLX5_ESWITCH_ACL_LGCY_H__
+
+#include "eswitch.h"
+
+/* Eswitch acl egress external APIs */
+int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+/* Eswitch acl ingress external APIs */
+int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+#endif /* __MLX5_ESWITCH_ACL_LGCY_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
new file mode 100644
index 000000000000..c57869b93d60
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
+
+#ifndef __MLX5_ESWITCH_ACL_OFLD_H__
+#define __MLX5_ESWITCH_ACL_OFLD_H__
+
+#include "eswitch.h"
+
+/* Eswitch acl egress external APIs */
+int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
+int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
+ u16 passive_vport_num);
+int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num);
+
+static inline bool mlx5_esw_acl_egress_fwd2vport_supported(struct mlx5_eswitch *esw)
+{
+ return esw && esw->mode == MLX5_ESWITCH_OFFLOADS &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ MLX5_CAP_ESW_FLOWTABLE(esw->dev, egress_acl_forward_to_vport);
+}
+
+/* Eswitch acl ingress external APIs */
+int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 metadata);
+
+#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
index 029001040737..d5bf908dfecd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
@@ -274,7 +274,7 @@ mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
static int
create_fdb_chain_restore(struct fdb_chain *fdb_chain)
{
- char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)];
+ char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
struct mlx5_eswitch *esw = fdb_chain->esw;
struct mlx5_modify_hdr *mod_hdr;
u32 index;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
index f8c4239846ea..7679ac359e31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
@@ -6,6 +6,8 @@
#include "eswitch.h"
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
bool
@@ -46,4 +48,21 @@ void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
int
mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline struct mlx5_flow_table *
+mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void
+mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
+ u32 level) {}
+
+static inline struct mlx5_flow_table *
+mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) { return ERR_PTR(-EOPNOTSUPP); }
+
+static inline int mlx5_esw_chains_create(struct mlx5_eswitch *esw) { return 0; }
+static inline void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) {}
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7f618a443bfd..1116ab9bea6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include "esw/acl/lgcy.h"
#include "mlx5_core.h"
#include "lib/eq.h"
#include "eswitch.h"
@@ -84,8 +85,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
- int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
- int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
void *nic_vport_ctx;
MLX5_SET(modify_nic_vport_context_in, in,
@@ -108,40 +108,24 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
}
/* E-Switch vport context HW commands */
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *in, int inlen)
+ bool other_vport, void *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
-
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
-
- MLX5_SET(query_esw_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
- MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
u16 vlan, u8 qos, u8 set_flags)
{
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
@@ -170,8 +154,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in,
field_select.vport_cvlan_insert, 1);
- return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
- in, sizeof(in));
+ return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
}
/* E-Switch FDB */
@@ -954,512 +937,6 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_group *vlan_grp = NULL;
- struct mlx5_flow_group *drop_grp = NULL;
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *acl;
- void *match_criteria;
- u32 *flow_group_in;
- /* The egress acl table contains 2 rules:
- * 1)Allow traffic with vlan_tag=vst_vlan_id
- * 2)Drop all other traffic.
- */
- int table_size = 2;
- int err = 0;
-
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- return -EOPNOTSUPP;
-
- if (!IS_ERR_OR_NULL(vport->egress.acl))
- return 0;
-
- esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
- vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
-
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- mlx5_eswitch_vport_num_to_index(esw, vport->vport));
- if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
- return -EOPNOTSUPP;
- }
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
- return -ENOMEM;
-
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR(acl)) {
- err = PTR_ERR(acl);
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
- vport->vport, err);
- goto out;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
-
- vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR(vlan_grp)) {
- err = PTR_ERR(vlan_grp);
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
- vport->vport, err);
- goto out;
- }
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
- drop_grp = mlx5_create_flow_group(acl, flow_group_in);
- if (IS_ERR(drop_grp)) {
- err = PTR_ERR(drop_grp);
- esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
- vport->vport, err);
- goto out;
- }
-
- vport->egress.acl = acl;
- vport->egress.drop_grp = drop_grp;
- vport->egress.allowed_vlans_grp = vlan_grp;
-out:
- kvfree(flow_group_in);
- if (err && !IS_ERR_OR_NULL(vlan_grp))
- mlx5_destroy_flow_group(vlan_grp);
- if (err && !IS_ERR_OR_NULL(acl))
- mlx5_destroy_flow_table(acl);
- return err;
-}
-
-void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
- mlx5_del_flow_rules(vport->egress.allowed_vlan);
- vport->egress.allowed_vlan = NULL;
- }
-
- if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
- mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
- vport->egress.legacy.drop_rule = NULL;
- }
-}
-
-void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (IS_ERR_OR_NULL(vport->egress.acl))
- return;
-
- esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
-
- esw_vport_cleanup_egress_rules(esw, vport);
- mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
- mlx5_destroy_flow_group(vport->egress.drop_grp);
- mlx5_destroy_flow_table(vport->egress.acl);
- vport->egress.allowed_vlans_grp = NULL;
- vport->egress.drop_grp = NULL;
- vport->egress.acl = NULL;
-}
-
-static int
-esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_group *g;
- void *match_criteria;
- u32 *flow_group_in;
- int err;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
- return -ENOMEM;
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
- vport->vport, err);
- goto spoof_err;
- }
- vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
- vport->vport, err);
- goto untagged_err;
- }
- vport->ingress.legacy.allow_untagged_only_grp = g;
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
- vport->vport, err);
- goto allow_spoof_err;
- }
- vport->ingress.legacy.allow_spoofchk_only_grp = g;
-
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
- vport->vport, err);
- goto drop_err;
- }
- vport->ingress.legacy.drop_grp = g;
- kvfree(flow_group_in);
- return 0;
-
-drop_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
- vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
- }
-allow_spoof_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
- vport->ingress.legacy.allow_untagged_only_grp = NULL;
- }
-untagged_err:
- if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
- vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
- }
-spoof_err:
- kvfree(flow_group_in);
- return err;
-}
-
-int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport, int table_size)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
- struct mlx5_flow_table *acl;
- int vport_index;
- int err;
-
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- return -EOPNOTSUPP;
-
- esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
- vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
-
- vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport_index);
- if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
- vport->vport);
- return -EOPNOTSUPP;
- }
-
- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
- if (IS_ERR(acl)) {
- err = PTR_ERR(acl);
- esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
- vport->vport, err);
- return err;
- }
- vport->ingress.acl = acl;
- return 0;
-}
-
-void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
-{
- if (!vport->ingress.acl)
- return;
-
- mlx5_destroy_flow_table(vport->ingress.acl);
- vport->ingress.acl = NULL;
-}
-
-void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (vport->ingress.legacy.drop_rule) {
- mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
- vport->ingress.legacy.drop_rule = NULL;
- }
-
- if (vport->ingress.allow_rule) {
- mlx5_del_flow_rules(vport->ingress.allow_rule);
- vport->ingress.allow_rule = NULL;
- }
-}
-
-static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (!vport->ingress.acl)
- return;
-
- esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
-
- esw_vport_cleanup_ingress_rules(esw, vport);
- if (vport->ingress.legacy.allow_spoofchk_only_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
- vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
- }
- if (vport->ingress.legacy.allow_untagged_only_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
- vport->ingress.legacy.allow_untagged_only_grp = NULL;
- }
- if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
- vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
- }
- if (vport->ingress.legacy.drop_grp) {
- mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
- vport->ingress.legacy.drop_grp = NULL;
- }
- esw_vport_destroy_ingress_acl_table(vport);
-}
-
-static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
- struct mlx5_flow_destination drop_ctr_dst = {0};
- struct mlx5_flow_destination *dst = NULL;
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec = NULL;
- int dest_num = 0;
- int err = 0;
- u8 *smac_v;
-
- /* The ingress acl table contains 4 groups
- * (2 active rules at the same time -
- * 1 allow rule from one of the first 3 groups.
- * 1 drop rule from the last group):
- * 1)Allow untagged traffic with smac=original mac.
- * 2)Allow untagged traffic.
- * 3)Allow traffic with smac=original mac.
- * 4)Drop all other traffic.
- */
- int table_size = 4;
-
- esw_vport_cleanup_ingress_rules(esw, vport);
-
- if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
- esw_vport_disable_legacy_ingress_acl(esw, vport);
- return 0;
- }
-
- if (!vport->ingress.acl) {
- err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
- if (err) {
- esw_warn(esw->dev,
- "vport[%d] enable ingress acl err (%d)\n",
- err, vport->vport);
- return err;
- }
-
- err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
- if (err)
- goto out;
- }
-
- esw_debug(esw->dev,
- "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->info.vlan, vport->info.qos);
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out;
- }
-
- if (vport->info.vlan || vport->info.qos)
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
-
- if (vport->info.spoofchk) {
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
- smac_v = MLX5_ADDR_OF(fte_match_param,
- spec->match_value,
- outer_headers.smac_47_16);
- ether_addr_copy(smac_v, vport->info.mac);
- }
-
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- vport->ingress.allow_rule =
- mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.allow_rule)) {
- err = PTR_ERR(vport->ingress.allow_rule);
- esw_warn(esw->dev,
- "vport[%d] configure ingress allow rule, err(%d)\n",
- vport->vport, err);
- vport->ingress.allow_rule = NULL;
- goto out;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
-
- /* Attach drop flow counter */
- if (counter) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
- dst = &drop_ctr_dst;
- dest_num++;
- }
- vport->ingress.legacy.drop_rule =
- mlx5_add_flow_rules(vport->ingress.acl, NULL,
- &flow_act, dst, dest_num);
- if (IS_ERR(vport->ingress.legacy.drop_rule)) {
- err = PTR_ERR(vport->ingress.legacy.drop_rule);
- esw_warn(esw->dev,
- "vport[%d] configure ingress drop rule, err(%d)\n",
- vport->vport, err);
- vport->ingress.legacy.drop_rule = NULL;
- goto out;
- }
- kvfree(spec);
- return 0;
-
-out:
- esw_vport_disable_legacy_ingress_acl(esw, vport);
- kvfree(spec);
- return err;
-}
-
-int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u16 vlan_id, u32 flow_action)
-{
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- if (vport->egress.allowed_vlan)
- return -EEXIST;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
-
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = flow_action;
- vport->egress.allowed_vlan =
- mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->egress.allowed_vlan)) {
- err = PTR_ERR(vport->egress.allowed_vlan);
- esw_warn(esw->dev,
- "vport[%d] configure egress vlan rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.allowed_vlan = NULL;
- }
-
- kvfree(spec);
- return err;
-}
-
-static int esw_vport_egress_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
- struct mlx5_flow_destination drop_ctr_dst = {0};
- struct mlx5_flow_destination *dst = NULL;
- struct mlx5_flow_act flow_act = {0};
- int dest_num = 0;
- int err = 0;
-
- esw_vport_cleanup_egress_rules(esw, vport);
-
- if (!vport->info.vlan && !vport->info.qos) {
- esw_vport_disable_egress_acl(esw, vport);
- return 0;
- }
-
- err = esw_vport_enable_egress_acl(esw, vport);
- if (err) {
- mlx5_core_warn(esw->dev,
- "failed to enable egress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
- }
-
- esw_debug(esw->dev,
- "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->info.vlan, vport->info.qos);
-
- /* Allowed vlan rule */
- err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
- MLX5_FLOW_CONTEXT_ACTION_ALLOW);
- if (err)
- return err;
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
-
- /* Attach egress drop flow counter */
- if (counter) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
- dst = &drop_ctr_dst;
- dest_num++;
- }
- vport->egress.legacy.drop_rule =
- mlx5_add_flow_rules(vport->egress.acl, NULL,
- &flow_act, dst, dest_num);
- if (IS_ERR(vport->egress.legacy.drop_rule)) {
- err = PTR_ERR(vport->egress.legacy.drop_rule);
- esw_warn(esw->dev,
- "vport[%d] configure egress drop rule failed, err(%d)\n",
- vport->vport, err);
- vport->egress.legacy.drop_rule = NULL;
- }
-
- return err;
-}
-
static bool element_type_supported(struct mlx5_eswitch *esw, int type)
{
const struct mlx5_core_dev *dev = esw->dev;
@@ -1671,44 +1148,19 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return 0;
- if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
- vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
- if (IS_ERR(vport->ingress.legacy.drop_counter)) {
- esw_warn(esw->dev,
- "vport[%d] configure ingress drop rule counter failed\n",
- vport->vport);
- vport->ingress.legacy.drop_counter = NULL;
- }
- }
-
- ret = esw_vport_ingress_config(esw, vport);
+ ret = esw_acl_ingress_lgcy_setup(esw, vport);
if (ret)
goto ingress_err;
- if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
- vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
- if (IS_ERR(vport->egress.legacy.drop_counter)) {
- esw_warn(esw->dev,
- "vport[%d] configure egress drop rule counter failed\n",
- vport->vport);
- vport->egress.legacy.drop_counter = NULL;
- }
- }
-
- ret = esw_vport_egress_config(esw, vport);
+ ret = esw_acl_egress_lgcy_setup(esw, vport);
if (ret)
goto egress_err;
return 0;
egress_err:
- esw_vport_disable_legacy_ingress_acl(esw, vport);
- mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
- vport->egress.legacy.drop_counter = NULL;
-
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
ingress_err:
- mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
- vport->ingress.legacy.drop_counter = NULL;
return ret;
}
@@ -1728,13 +1180,8 @@ static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return;
- esw_vport_disable_egress_acl(esw, vport);
- mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
- vport->egress.legacy.drop_counter = NULL;
-
- esw_vport_disable_legacy_ingress_acl(esw, vport);
- mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
- vport->ingress.legacy.drop_counter = NULL;
+ esw_acl_egress_lgcy_cleanup(esw, vport);
+ esw_acl_ingress_lgcy_cleanup(esw, vport);
}
static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
@@ -1901,7 +1348,7 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
if (!err)
return out;
@@ -2280,7 +1727,10 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->offloads.mod_hdr.lock);
hash_init(esw->offloads.mod_hdr.hlist);
+ mutex_init(&esw->offloads.decap_tbl_lock);
+ hash_init(esw->offloads.decap_tbl);
atomic64_set(&esw->offloads.num_flows, 0);
+ ida_init(&esw->offloads.vport_metadata_ida);
mutex_init(&esw->state_lock);
mutex_init(&esw->mode_lock);
@@ -2319,8 +1769,10 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock);
+ ida_destroy(&esw->offloads.vport_metadata_ida);
mutex_destroy(&esw->offloads.mod_hdr.lock);
mutex_destroy(&esw->offloads.encap_tbl_lock);
+ mutex_destroy(&esw->offloads.decap_tbl_lock);
kfree(esw->vports);
kfree(esw);
}
@@ -2363,7 +1815,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
ether_addr_copy(evport->info.mac, mac);
evport->info.node_guid = node_guid;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
- err = esw_vport_ingress_config(esw, evport);
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
unlock:
mutex_unlock(&esw->state_lock);
@@ -2445,10 +1897,10 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
evport->info.vlan = vlan;
evport->info.qos = qos;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
- err = esw_vport_ingress_config(esw, evport);
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
if (err)
return err;
- err = esw_vport_egress_config(esw, evport);
+ err = esw_acl_egress_lgcy_setup(esw, evport);
}
return err;
@@ -2490,7 +1942,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
"Spoofchk in set while MAC is invalid, vport(%d)\n",
evport->vport);
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
- err = esw_vport_ingress_config(esw, evport);
+ err = esw_acl_ingress_lgcy_setup(esw, evport);
if (err)
evport->info.spoofchk = pschk;
mutex_unlock(&esw->state_lock);
@@ -2749,7 +2201,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
if (!vport->enabled)
goto unlock;
- if (vport->egress.legacy.drop_counter)
+ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
&stats->rx_dropped, &bytes);
@@ -2783,8 +2235,8 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
- struct mlx5_vport_drop_stats stats = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
+ struct mlx5_vport_drop_stats stats = {};
int err = 0;
u32 *out;
@@ -2801,7 +2253,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
- err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
if (err)
goto free_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index c1848b57f61c..a5175e98c0b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -99,13 +99,19 @@ struct vport_ingress {
struct vport_egress {
struct mlx5_flow_table *acl;
- struct mlx5_flow_group *allowed_vlans_grp;
- struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
- struct {
- struct mlx5_flow_handle *drop_rule;
- struct mlx5_fc *drop_counter;
- } legacy;
+ struct mlx5_flow_group *vlan_grp;
+ union {
+ struct {
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+ } legacy;
+ struct {
+ struct mlx5_flow_group *fwd_grp;
+ struct mlx5_flow_handle *fwd_rule;
+ } offloads;
+ };
};
struct mlx5_vport_drop_stats {
@@ -143,6 +149,8 @@ struct mlx5_vport {
struct vport_ingress ingress;
struct vport_egress egress;
+ u32 default_metadata;
+ u32 metadata;
struct mlx5_vport_info info;
@@ -209,6 +217,8 @@ struct mlx5_esw_offload {
struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8);
+ struct mutex decap_tbl_lock; /* protects decap_tbl */
+ DECLARE_HASHTABLE(decap_tbl, 8);
struct mod_hdr_tbl mod_hdr;
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
@@ -216,6 +226,7 @@ struct mlx5_esw_offload {
u8 inline_mode;
atomic64_t num_flows;
enum devlink_eswitch_encap_mode encap;
+ struct ida vport_metadata_ida;
};
/* E-Switch MC FDB table hash node */
@@ -283,18 +294,10 @@ void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
-void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- int table_size);
-void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
-void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
-void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport);
+
+u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
+void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
+
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
u32 rate_mbps);
@@ -329,11 +332,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *in, int inlen);
-int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
- bool other_vport,
- void *out, int outlen);
+ bool other_vport, void *in);
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
@@ -436,6 +435,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_flow_table *fdb;
struct mlx5_flow_table *dest_ft;
struct mlx5_ct_attr ct_attr;
+ struct mlx5_pkt_reformat *decap_pkt_reformat;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
@@ -459,10 +459,6 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
-int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u16 vlan_id, u32 flow_action);
-
static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
{
return esw->qos.enabled;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 5d9def18ae3a..060354bb211a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -31,12 +31,14 @@
*/
#include <linux/etherdevice.h>
+#include <linux/idr.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "esw/acl/ofld.h"
#include "esw/chains.h"
#include "rdma.h"
#include "en.h"
@@ -234,13 +236,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
-static bool
-esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
- const struct mlx5_vport *vport)
-{
- return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
- mlx5_eswitch_is_vf_vport(esw, vport->vport));
-}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@@ -366,6 +361,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
}
}
+
+ if (attr->decap_pkt_reformat)
+ flow_act.pkt_reformat = attr->decap_pkt_reformat;
+
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[i].counter_id = mlx5_fc_id(attr->counter);
@@ -784,7 +783,8 @@ static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
+ u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
u8 curr, wanted;
int err;
@@ -792,8 +792,9 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
- err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
- out, sizeof(out));
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
if (err)
return err;
@@ -808,14 +809,12 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
else
curr &= ~wanted;
- MLX5_SET(modify_esw_vport_context_in, in,
+ MLX5_SET(modify_esw_vport_context_in, min,
esw_vport_context.fdb_to_vport_reg_c_id, curr);
-
- MLX5_SET(modify_esw_vport_context_in, in,
+ MLX5_SET(modify_esw_vport_context_in, min,
field_select.fdb_to_vport_reg_c_id, 1);
- err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
- sizeof(in));
+ err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
if (!err) {
if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
@@ -1468,7 +1467,7 @@ query_vports:
out:
*mode = mlx5_mode;
return 0;
-}
+}
static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
{
@@ -1484,7 +1483,7 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
static int esw_create_restore_table(struct mlx5_eswitch *esw)
{
- u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
+ u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
@@ -1727,7 +1726,9 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
{
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
mlx5e_tc_clean_fdb_peer_flows(esw);
+#endif
esw_del_fdb_peer_miss_rules(esw);
}
@@ -1845,279 +1846,6 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
-static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- /* For prio tag mode, there is only 1 FTEs:
- * 1) Untagged packets - push prio tag VLAN and modify metadata if
- * required, allow
- * Unmatched traffic is allowed by default
- */
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- /* Untagged packets - push prio tag VLAN, allow */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
- MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.vlan[0].ethtype = ETH_P_8021Q;
- flow_act.vlan[0].vid = 0;
- flow_act.vlan[0].prio = 0;
-
- if (vport->ingress.offloads.modify_metadata_rule) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
- }
-
- vport->ingress.allow_rule =
- mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.allow_rule)) {
- err = PTR_ERR(vport->ingress.allow_rule);
- esw_warn(esw->dev,
- "vport[%d] configure ingress untagged allow rule, err(%d)\n",
- vport->vport, err);
- vport->ingress.allow_rule = NULL;
- }
-
- kvfree(spec);
- return err;
-}
-
-static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
- struct mlx5_flow_act flow_act = {};
- int err = 0;
- u32 key;
-
- key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
- key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
-
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field,
- MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
- MLX5_SET(set_action_in, action, data, key);
- MLX5_SET(set_action_in, action, offset,
- ESW_SOURCE_PORT_METADATA_OFFSET);
- MLX5_SET(set_action_in, action, length,
- ESW_SOURCE_PORT_METADATA_BITS);
-
- vport->ingress.offloads.modify_metadata =
- mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- 1, action);
- if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
- err = PTR_ERR(vport->ingress.offloads.modify_metadata);
- esw_warn(esw->dev,
- "failed to alloc modify header for vport %d ingress acl (%d)\n",
- vport->vport, err);
- return err;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
- vport->ingress.offloads.modify_metadata_rule =
- mlx5_add_flow_rules(vport->ingress.acl,
- NULL, &flow_act, NULL, 0);
- if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
- err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
- esw_warn(esw->dev,
- "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
- vport->vport, err);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
- vport->ingress.offloads.modify_metadata_rule = NULL;
- }
- return err;
-}
-
-static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- if (vport->ingress.offloads.modify_metadata_rule) {
- mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
- mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
-
- vport->ingress.offloads.modify_metadata_rule = NULL;
- }
-}
-
-static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_group *g;
- void *match_criteria;
- u32 *flow_group_in;
- u32 flow_index = 0;
- int ret = 0;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
- return -ENOMEM;
-
- if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
- /* This group is to hold FTE to match untagged packets when prio_tag
- * is enabled.
- */
- memset(flow_group_in, 0, inlen);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- flow_group_in, match_criteria);
- MLX5_SET(create_flow_group_in, flow_group_in,
- match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- ret = PTR_ERR(g);
- esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
- vport->vport, ret);
- goto prio_tag_err;
- }
- vport->ingress.offloads.metadata_prio_tag_grp = g;
- flow_index++;
- }
-
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- /* This group holds an FTE with no matches for add metadata for
- * tagged packets, if prio-tag is enabled (as a fallthrough),
- * or all traffic in case prio-tag is disabled.
- */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
-
- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
- if (IS_ERR(g)) {
- ret = PTR_ERR(g);
- esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
- vport->vport, ret);
- goto metadata_err;
- }
- vport->ingress.offloads.metadata_allmatch_grp = g;
- }
-
- kvfree(flow_group_in);
- return 0;
-
-metadata_err:
- if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
- mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
- vport->ingress.offloads.metadata_prio_tag_grp = NULL;
- }
-prio_tag_err:
- kvfree(flow_group_in);
- return ret;
-}
-
-static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
-{
- if (vport->ingress.offloads.metadata_allmatch_grp) {
- mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
- vport->ingress.offloads.metadata_allmatch_grp = NULL;
- }
-
- if (vport->ingress.offloads.metadata_prio_tag_grp) {
- mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
- vport->ingress.offloads.metadata_prio_tag_grp = NULL;
- }
-}
-
-static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int num_ftes = 0;
- int err;
-
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
- !esw_check_ingress_prio_tag_enabled(esw, vport))
- return 0;
-
- esw_vport_cleanup_ingress_rules(esw, vport);
-
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- num_ftes++;
- if (esw_check_ingress_prio_tag_enabled(esw, vport))
- num_ftes++;
-
- err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
- if (err) {
- esw_warn(esw->dev,
- "failed to enable ingress acl (%d) on vport[%d]\n",
- err, vport->vport);
- return err;
- }
-
- err = esw_vport_create_ingress_acl_group(esw, vport);
- if (err)
- goto group_err;
-
- esw_debug(esw->dev,
- "vport[%d] configure ingress rules\n", vport->vport);
-
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
- if (err)
- goto metadata_err;
- }
-
- if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
- err = esw_vport_ingress_prio_tag_config(esw, vport);
- if (err)
- goto prio_tag_err;
- }
- return 0;
-
-prio_tag_err:
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
-metadata_err:
- esw_vport_destroy_ingress_acl_group(vport);
-group_err:
- esw_vport_destroy_ingress_acl_table(vport);
- return err;
-}
-
-static int esw_vport_egress_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- int err;
-
- if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
- return 0;
-
- esw_vport_cleanup_egress_rules(esw, vport);
-
- err = esw_vport_enable_egress_acl(esw, vport);
- if (err)
- return err;
-
- /* For prio tag mode, there is only 1 FTEs:
- * 1) prio tag packets - pop the prio tag VLAN, allow
- * Unmatched traffic is allowed by default
- */
- esw_debug(esw->dev,
- "vport[%d] configure prio tag egress rules\n", vport->vport);
-
- /* prio tag vlan rule - pop it so VF receives untagged packets */
- err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
- MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
- MLX5_FLOW_CONTEXT_ACTION_ALLOW);
- if (err)
- esw_vport_disable_egress_acl(esw, vport);
-
- return err;
-}
-
static bool
esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
{
@@ -2150,25 +1878,83 @@ static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
esw_check_vport_match_metadata_supported(esw);
}
+u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
+{
+ u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
+ u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
+ u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ u32 start;
+ u32 end;
+ int id;
+
+ /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
+ WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
+
+ /* Trim vhca_id to ESW_VHCA_ID_BITS */
+ vhca_id &= vhca_id_mask;
+
+ start = (vhca_id << ESW_VPORT_BITS);
+ end = start + num_vports;
+ if (!vhca_id)
+ start += 1; /* zero is reserved/invalid metadata */
+ id = ida_alloc_range(&esw->offloads.vport_metadata_ida, start, end, GFP_KERNEL);
+
+ return (id < 0) ? 0 : id;
+}
+
+void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
+{
+ ida_free(&esw->offloads.vport_metadata_ida, metadata);
+}
+
+static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ return 0;
+
+ vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
+ vport->metadata = vport->default_metadata;
+ return vport->metadata ? 0 : -ENOSPC;
+}
+
+static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata)
+ return;
+
+ WARN_ON(vport->metadata != vport->default_metadata);
+ mlx5_esw_match_metadata_free(esw, vport->default_metadata);
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int err;
- err = esw_vport_ingress_config(esw, vport);
+ err = esw_offloads_vport_metadata_setup(esw, vport);
if (err)
- return err;
+ goto metadata_err;
+
+ err = esw_acl_ingress_ofld_setup(esw, vport);
+ if (err)
+ goto ingress_err;
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
- err = esw_vport_egress_config(esw, vport);
- if (err) {
- esw_vport_cleanup_ingress_rules(esw, vport);
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
- esw_vport_destroy_ingress_acl_group(vport);
- esw_vport_destroy_ingress_acl_table(vport);
- }
+ err = esw_acl_egress_ofld_setup(esw, vport);
+ if (err)
+ goto egress_err;
}
+
+ return 0;
+
+egress_err:
+ esw_acl_ingress_ofld_cleanup(esw, vport);
+ingress_err:
+ esw_offloads_vport_metadata_cleanup(esw, vport);
+metadata_err:
return err;
}
@@ -2176,11 +1962,9 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- esw_vport_disable_egress_acl(esw, vport);
- esw_vport_cleanup_ingress_rules(esw, vport);
- esw_vport_del_ingress_acl_modify_metadata(esw, vport);
- esw_vport_destroy_ingress_acl_group(vport);
- esw_vport_destroy_ingress_acl_table(vport);
+ esw_acl_egress_ofld_cleanup(vport);
+ esw_acl_ingress_ofld_cleanup(esw, vport);
+ esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
@@ -2846,38 +2630,11 @@ EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num)
{
- u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
- u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
- u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
- u32 val;
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
- /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
- WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
-
- /* Trim vhca_id to ESW_VHCA_ID_BITS */
- vhca_id &= vhca_id_mask;
-
- /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
- * don't overlap with VF numbers, and themselves, after trimming.
- */
- WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
- vport_num_mask - 1);
- WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
- vport_num_mask - 1);
- WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
- (MLX5_VPORT_ECPF & vport_num_mask));
-
- /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
- * overlap with pf and ecpf.
- */
- if (vport_num != MLX5_VPORT_UPLINK &&
- vport_num != MLX5_VPORT_ECPF)
- WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
-
- /* We can now trim vport_num to ESW_VPORT_BITS */
- vport_num &= vport_num_mask;
+ if (WARN_ON_ONCE(IS_ERR(vport)))
+ return 0;
- val = (vhca_id << ESW_VPORT_BITS) | vport_num;
- return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
+ return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index c0fd2212e890..9a37077152aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/etherdevice.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
@@ -143,15 +142,15 @@ int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
int mlx5_fpga_create_qp(struct mlx5_core_dev *dev, void *fpga_qpc,
u32 *fpga_qpn)
{
- u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_create_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_create_qp_in)] = {};
int ret;
MLX5_SET(fpga_create_qp_in, in, opcode, MLX5_CMD_OP_FPGA_CREATE_QP);
memcpy(MLX5_ADDR_OF(fpga_create_qp_in, in, fpga_qpc), fpga_qpc,
MLX5_FLD_SZ_BYTES(fpga_create_qp_in, fpga_qpc));
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_create_qp, in, out);
if (ret)
return ret;
@@ -165,8 +164,7 @@ int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
enum mlx5_fpga_qpc_field_select fields,
void *fpga_qpc)
{
- u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_modify_qp_out)];
+ u32 in[MLX5_ST_SZ_DW(fpga_modify_qp_in)] = {};
MLX5_SET(fpga_modify_qp_in, in, opcode, MLX5_CMD_OP_FPGA_MODIFY_QP);
MLX5_SET(fpga_modify_qp_in, in, field_select, fields);
@@ -174,20 +172,20 @@ int mlx5_fpga_modify_qp(struct mlx5_core_dev *dev, u32 fpga_qpn,
memcpy(MLX5_ADDR_OF(fpga_modify_qp_in, in, fpga_qpc), fpga_qpc,
MLX5_FLD_SZ_BYTES(fpga_modify_qp_in, fpga_qpc));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, fpga_modify_qp, in);
}
int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
u32 fpga_qpn, void *fpga_qpc)
{
- u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_in)] = {};
int ret;
MLX5_SET(fpga_query_qp_in, in, opcode, MLX5_CMD_OP_FPGA_QUERY_QP);
MLX5_SET(fpga_query_qp_in, in, fpga_qpn, fpga_qpn);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_query_qp, in, out);
if (ret)
return ret;
@@ -198,20 +196,19 @@ int mlx5_fpga_query_qp(struct mlx5_core_dev *dev,
int mlx5_fpga_destroy_qp(struct mlx5_core_dev *dev, u32 fpga_qpn)
{
- u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_destroy_qp_out)];
+ u32 in[MLX5_ST_SZ_DW(fpga_destroy_qp_in)] = {};
MLX5_SET(fpga_destroy_qp_in, in, opcode, MLX5_CMD_OP_FPGA_DESTROY_QP);
MLX5_SET(fpga_destroy_qp_in, in, fpga_qpn, fpga_qpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, fpga_destroy_qp, in);
}
int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
bool clear, struct mlx5_fpga_qp_counters *data)
{
- u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)];
+ u32 out[MLX5_ST_SZ_DW(fpga_query_qp_counters_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(fpga_query_qp_counters_in)] = {};
int ret;
MLX5_SET(fpga_query_qp_counters_in, in, opcode,
@@ -219,7 +216,7 @@ int mlx5_fpga_query_qp_counters(struct mlx5_core_dev *dev, u32 fpga_qpn,
MLX5_SET(fpga_query_qp_counters_in, in, clear, clear);
MLX5_SET(fpga_query_qp_counters_in, in, fpga_qpn, fpga_qpn);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, fpga_query_qp_counters, in, out);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 61021133029e..182d3ac3e73f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -165,7 +165,7 @@ static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
MLX5_OPCODE_SEND);
- ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
+ ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.qpn << 8));
conn->qp.sq.pc++;
conn->qp.sq.bufs[ix] = buf;
@@ -362,23 +362,6 @@ static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
}
-static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
- enum mlx5_event event)
-{
- struct mlx5_fpga_conn *conn;
-
- conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
- mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
-}
-
-static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
-{
- struct mlx5_fpga_conn *conn;
-
- conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
- mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
-}
-
static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
unsigned int budget)
{
@@ -493,7 +476,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
*conn->cq.mcq.arm_db = 0;
conn->cq.mcq.vector = 0;
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.event = mlx5_fpga_conn_cq_event;
conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
@@ -534,8 +516,9 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
unsigned int tx_size, unsigned int rx_size)
{
struct mlx5_fpga_device *fdev = conn->fdev;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
+ u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
void *in = NULL, *qpc;
int err, inlen;
@@ -600,12 +583,13 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
- err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
goto err_sq_bufs;
- conn->qp.mqp.event = mlx5_fpga_conn_event;
- mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
+ conn->qp.qpn = MLX5_GET(create_qp_out, out, qpn);
+ mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.qpn);
goto out;
@@ -658,7 +642,13 @@ static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
{
- mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
+ struct mlx5_core_dev *dev = conn->fdev->mdev;
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, conn->qp.qpn);
+ mlx5_cmd_exec_in(dev, destroy_qp, in);
+
mlx5_fpga_conn_free_recv_bufs(conn);
mlx5_fpga_conn_flush_send_bufs(conn);
kvfree(conn->qp.sq.bufs);
@@ -666,30 +656,29 @@ static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
mlx5_wq_destroy(&conn->qp.wq_ctrl);
}
-static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
{
struct mlx5_core_dev *mdev = conn->fdev->mdev;
+ u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
+
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.qpn);
- mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
+ MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
+ MLX5_SET(qp_2rst_in, in, qpn, conn->qp.qpn);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
- &conn->qp.mqp);
+ return mlx5_cmd_exec_in(mdev, qp_2rst, in);
}
-static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
{
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- int err;
+ u32 *qpc;
- mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
+ mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.qpn);
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
@@ -700,32 +689,22 @@ static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, conn->qp.qpn);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
}
-static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
{
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- int err;
+ u32 *qpc;
mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
@@ -745,33 +724,22 @@ static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, conn->qp.qpn);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
}
-static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
+static int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
{
struct mlx5_fpga_device *fdev = conn->fdev;
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
struct mlx5_core_dev *mdev = fdev->mdev;
- u32 *qpc = NULL;
- u32 opt_mask;
- int err;
+ u32 *qpc;
mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
- qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
- if (!qpc) {
- err = -ENOMEM;
- goto out;
- }
+ qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
@@ -781,17 +749,11 @@ static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, retry_count, 7);
MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
- opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
- &conn->qp.mqp);
- if (err) {
- mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
- goto out;
- }
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, conn->qp.qpn);
+ MLX5_SET(rtr2rts_qp_in, in, opt_param_mask, MLX5_QP_OPTPAR_RNR_TIMEOUT);
-out:
- kfree(qpc);
- return err;
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
}
static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
@@ -931,7 +893,7 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
- MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
+ MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.qpn);
MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
@@ -972,19 +934,11 @@ out:
void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
{
- struct mlx5_fpga_device *fdev = conn->fdev;
- struct mlx5_core_dev *mdev = fdev->mdev;
- int err = 0;
-
conn->qp.active = false;
tasklet_disable(&conn->cq.tasklet);
synchronize_irq(conn->cq.mcq.irqn);
mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
- &conn->qp.mqp);
- if (err)
- mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
mlx5_fpga_conn_destroy_qp(conn);
mlx5_fpga_conn_destroy_cq(conn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
index 634ae10e287b..5116e869a6e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
@@ -65,7 +65,7 @@ struct mlx5_fpga_conn {
int sgid_index;
struct mlx5_wq_qp wq;
struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_core_qp mqp;
+ u32 qpn;
struct {
spinlock_t lock; /* Protects all SQ state */
unsigned int pc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index b794888fa3ba..b463787d6ca1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -65,6 +65,7 @@ struct mlx5_fpga_esp_xfrm;
struct mlx5_fpga_ipsec_sa_ctx {
struct rhash_head hash;
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
+ u32 sa_handle;
struct mlx5_core_dev *dev;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
};
@@ -119,6 +120,8 @@ struct mlx5_fpga_ipsec {
*/
struct rb_root rules_rb;
struct mutex rules_rb_lock; /* rules lock */
+
+ struct ida halloc;
};
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
@@ -602,7 +605,7 @@ static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
const u32 *match_c,
const u32 *match_v)
{
- u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
+ u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
bool ipv6_flow;
ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
@@ -666,7 +669,8 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
- const __be32 spi, bool is_ipv6)
+ const __be32 spi, bool is_ipv6,
+ u32 *sa_handle)
{
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
@@ -704,6 +708,17 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
goto exists;
}
+ if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
+ err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL);
+ if (err < 0) {
+ context = ERR_PTR(err);
+ goto exists;
+ }
+
+ sa_ctx->sa_handle = err;
+ if (sa_handle)
+ *sa_handle = sa_ctx->sa_handle;
+ }
/* This is unbounded fpga_xfrm, try to add to hash */
mutex_lock(&fipsec->sa_hash_lock);
@@ -744,7 +759,8 @@ delete_hash:
rhash_sa));
unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock);
-
+ if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
exists:
mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx);
@@ -816,7 +832,7 @@ mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
/* create */
return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
saddr, daddr,
- spi, is_ipv6);
+ spi, is_ipv6, NULL);
}
static void
@@ -836,6 +852,10 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
return;
}
+ if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
+ MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+
mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
rhash_sa));
@@ -1299,6 +1319,8 @@ int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
goto err_destroy_hash;
}
+ ida_init(&fdev->ipsec->halloc);
+
return 0;
err_destroy_hash:
@@ -1331,6 +1353,7 @@ void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
if (!mlx5_fpga_is_ipsec_device(mdev))
return;
+ ida_destroy(&fdev->ipsec->halloc);
destroy_rules_rb(&fdev->ipsec->rules_rb);
rhashtable_destroy(&fdev->ipsec->sa_hash);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
index 382985e65b48..9ba637f0f0f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -37,6 +37,7 @@
#include "accel/ipsec.h"
#include "fs_cmd.h"
+#ifdef CONFIG_MLX5_FPGA_IPSEC
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
@@ -46,7 +47,8 @@ void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
- const __be32 spi, bool is_ipv6);
+ const __be32 spi, bool is_ipv6,
+ u32 *sa_handle);
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
@@ -63,5 +65,17 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
+#else
+static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
-#endif /* __MLX5_FPGA_SADB_H__ */
+static inline const struct mlx5_flow_cmds *
+mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
+{
+ return mlx5_fs_cmd_get_default(type);
+}
+
+#endif /* CONFIG_MLX5_FPGA_IPSEC */
+#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 22a2ef111514..29b7339ebfa3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -194,8 +194,8 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
MLX5_GET(tls_flow, flow, direction_sx));
}
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
- u64 rcd_sn)
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn)
{
struct mlx5_fpga_dma_buf *buf;
int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
index 3b2e37bf76fe..5714cf391d1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -68,7 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
return mdev->fpga->tls->caps;
}
-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
- u64 rcd_sn);
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
+ u32 seq, __be64 rcd_sn);
#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 90048697b2ff..465a1076a477 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -155,8 +155,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
@@ -167,13 +166,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
- if (disconnect) {
+ if (disconnect)
MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
- MLX5_SET(set_flow_table_root_in, in, table_id, 0);
- } else {
- MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
+ else
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
- }
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (ft->vport) {
@@ -181,7 +177,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
}
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
@@ -192,8 +188,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
- u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
@@ -239,7 +235,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
break;
}
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
if (!err)
ft->id = MLX5_GET(create_flow_table_out, out,
table_id);
@@ -249,8 +245,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_table_in, in, opcode,
@@ -262,15 +257,14 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
}
static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(modify_flow_table_in, in, opcode,
@@ -310,7 +304,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
}
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_flow_table, in);
}
static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
@@ -318,8 +312,7 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
u32 *in,
struct mlx5_flow_group *fg)
{
- u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
@@ -332,7 +325,7 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
group_id);
@@ -343,8 +336,7 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_group_in, in, opcode,
@@ -357,7 +349,7 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
@@ -600,8 +592,7 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
@@ -613,22 +604,22 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, delete_fte, in);
}
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id)
{
- u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
int err;
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
if (!err)
*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
return err;
@@ -641,21 +632,20 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
MLX5_SET(dealloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
}
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes)
{
u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+ MLX5_ST_SZ_BYTES(traffic_counter)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
void *stats;
int err = 0;
@@ -683,11 +673,10 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out)
{
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
- MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
@@ -700,7 +689,7 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
struct mlx5_core_dev *dev = ns->dev;
void *packet_reformat_context_in;
int max_encap_size;
@@ -732,7 +721,6 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat_data);
inlen = reformat - (void *)in + size;
- memset(in, 0, inlen);
MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
@@ -741,7 +729,6 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat_type, reformat_type);
memcpy(reformat, reformat_data, size);
- memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
@@ -753,17 +740,15 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
- memset(in, 0, sizeof(in));
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
pkt_reformat->id);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
}
static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
@@ -771,7 +756,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
void *modify_actions,
struct mlx5_modify_hdr *modify_hdr)
{
- u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
+ u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
int max_actions, actions_size, inlen, err;
struct mlx5_core_dev *dev = ns->dev;
void *actions_in;
@@ -796,6 +781,10 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
table_type = FS_FT_ESW_INGRESS_ACL;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
+ table_type = FS_FT_RDMA_TX;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -806,7 +795,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
return -EOPNOTSUPP;
}
- actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
+ actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
in = kzalloc(inlen, GFP_KERNEL);
@@ -821,7 +810,6 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
memcpy(actions_in, modify_actions, actions_size);
- memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
@@ -832,17 +820,15 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
+ u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
- memset(in, 0, sizeof(in));
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
modify_hdr->id);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9620c8650e13..13e2fb79c21a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -254,7 +254,7 @@ static void del_sw_flow_group(struct fs_node *node);
static void del_sw_fte(struct fs_node *node);
static void del_sw_prio(struct fs_node *node);
static void del_sw_ns(struct fs_node *node);
-/* Delete rule (destination) is special case that
+/* Delete rule (destination) is special case that
* requires to lock the FTE for all the deletion process.
*/
static void del_sw_hw_rule(struct fs_node *node);
@@ -379,6 +379,12 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
return NULL;
}
+static bool is_fwd_next_action(u32 action)
+{
+ return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
+}
+
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
{
int i;
@@ -499,7 +505,7 @@ static void del_sw_hw_rule(struct fs_node *node)
fs_get_obj(rule, node);
fs_get_obj(fte, rule->node.parent);
trace_mlx5_fs_del_rule(rule);
- if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+ if (is_fwd_next_action(rule->sw_action)) {
mutex_lock(&rule->dest_attr.ft->lock);
list_del(&rule->next_ft);
mutex_unlock(&rule->dest_attr.ft->lock);
@@ -823,6 +829,36 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
return find_closest_ft(prio, true);
}
+static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
+ struct mlx5_flow_namespace *ns)
+{
+ struct mlx5_flow_namespace *root_ns = &root->ns;
+ struct fs_prio *iter_prio;
+ struct fs_prio *prio;
+
+ fs_get_obj(prio, ns->node.parent);
+ list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
+ if (iter_prio == prio &&
+ !list_is_last(&prio->node.children, &iter_prio->node.list))
+ return list_next_entry(iter_prio, node.list);
+ }
+ return NULL;
+}
+
+static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
+ struct mlx5_flow_act *flow_act)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ struct fs_prio *prio;
+
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
+ prio = find_fwd_ns_prio(root, ft->ns);
+ else
+ fs_get_obj(prio, ft->node.parent);
+
+ return (prio) ? find_next_chained_ft(prio) : NULL;
+}
+
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
struct fs_prio *prio,
struct mlx5_flow_table *ft)
@@ -973,6 +1009,10 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
mutex_unlock(&old_next_ft->lock);
list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
+ if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
+ iter->ft->ns == new_next_ft->ns)
+ continue;
+
err = _mlx5_modify_rule_destination(iter, &dest);
if (err)
pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
@@ -1074,6 +1114,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
next_ft = unmanaged ? ft_attr->next_ft :
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
+ ft->ns = ns;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
@@ -1752,11 +1793,13 @@ skip_search:
list_for_each_entry(iter, match_head, list) {
g = iter->g;
- if (!g->node.active)
- continue;
-
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ if (!g->node.active) {
+ up_write_ref_node(&g->node, false);
+ continue;
+ }
+
err = insert_fte(g, fte);
if (err) {
up_write_ref_node(&g->node, false);
@@ -1896,48 +1939,59 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
static const struct mlx5_flow_spec zero_spec = {};
- struct mlx5_flow_destination gen_dest = {};
+ struct mlx5_flow_destination *gen_dest = NULL;
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
- struct fs_prio *prio;
+ int i;
if (!spec)
spec = &zero_spec;
- fs_get_obj(prio, ft->node.parent);
- if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
- if (!fwd_next_prio_supported(ft))
- return ERR_PTR(-EOPNOTSUPP);
- if (num_dest)
- return ERR_PTR(-EINVAL);
- mutex_lock(&root->chain_lock);
- next_ft = find_next_chained_ft(prio);
- if (next_ft) {
- gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- gen_dest.ft = next_ft;
- dest = &gen_dest;
- num_dest = 1;
- flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- } else {
- mutex_unlock(&root->chain_lock);
- return ERR_PTR(-EOPNOTSUPP);
- }
- }
+ if (!is_fwd_next_action(sw_action))
+ return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
- handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
+ if (!fwd_next_prio_supported(ft))
+ return ERR_PTR(-EOPNOTSUPP);
- if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
- if (!IS_ERR_OR_NULL(handle) &&
- (list_empty(&handle->rule[0]->next_ft))) {
- mutex_lock(&next_ft->lock);
- list_add(&handle->rule[0]->next_ft,
- &next_ft->fwd_rules);
- mutex_unlock(&next_ft->lock);
- handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
- }
- mutex_unlock(&root->chain_lock);
- }
+ mutex_lock(&root->chain_lock);
+ next_ft = find_next_fwd_ft(ft, flow_act);
+ if (!next_ft) {
+ handle = ERR_PTR(-EOPNOTSUPP);
+ goto unlock;
+ }
+
+ gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
+ GFP_KERNEL);
+ if (!gen_dest) {
+ handle = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+ for (i = 0; i < num_dest; i++)
+ gen_dest[i] = dest[i];
+ gen_dest[i].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ gen_dest[i].ft = next_ft;
+ dest = gen_dest;
+ num_dest++;
+ flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
+ if (IS_ERR(handle))
+ goto unlock;
+
+ if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
+ mutex_lock(&next_ft->lock);
+ list_add(&handle->rule[num_dest - 1]->next_ft,
+ &next_ft->fwd_rules);
+ mutex_unlock(&next_ft->lock);
+ handle->rule[num_dest - 1]->sw_action = sw_action;
+ handle->rule[num_dest - 1]->ft = ft;
+ }
+unlock:
+ mutex_unlock(&root->chain_lock);
+ kfree(gen_dest);
return handle;
}
EXPORT_SYMBOL(mlx5_add_flow_rules);
@@ -2367,7 +2421,7 @@ static struct mlx5_flow_root_namespace
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
- if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
(table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
@@ -2951,7 +3005,8 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
+ if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 508108c58dae..825b662f809b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -138,6 +138,7 @@ struct fs_node {
struct mlx5_flow_rule {
struct fs_node node;
+ struct mlx5_flow_table *ft;
struct mlx5_flow_destination dest_attr;
/* next_ft should be accessed under chain_lock and only of
* destination type is FWD_NEXT_fT.
@@ -175,6 +176,7 @@ struct mlx5_flow_table {
u32 flags;
struct rhltable fgs_hash;
enum mlx5_flow_table_miss_action def_miss_action;
+ struct mlx5_flow_namespace *ns;
};
struct mlx5_ft_underlay_qp {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 90e3d0233101..a5fbe7343508 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -31,7 +31,6 @@
*/
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <linux/mlx5/eswitch.h>
#include <linux/module.h>
#include "mlx5_core.h"
@@ -68,26 +67,19 @@ enum {
MCQI_FW_STORED_VERSION = 1,
};
-static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
- int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
-
- MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
int mlx5_query_board_id(struct mlx5_core_dev *dev)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {};
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- err = mlx5_cmd_query_adapter(dev, out, outlen);
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+ err = mlx5_cmd_exec_inout(dev, query_adapter, in, out);
if (err)
goto out;
@@ -106,13 +98,15 @@ int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {};
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+ err = mlx5_cmd_exec_inout(mdev, query_adapter, in, out);
if (err)
goto out;
@@ -260,8 +254,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
{
- u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {};
int i;
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
@@ -272,16 +265,15 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
sw_owner_id[i]);
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, init_hca, in);
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
- u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, teardown_hca, in);
}
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
@@ -316,8 +308,8 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
- u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
int state;
int ret;
@@ -330,7 +322,7 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
MLX5_SET(teardown_hca_in, in, profile,
MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, teardown_hca, in, out);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f99e1752d4e5..c0cfbab15fe9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -36,7 +36,6 @@
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 505cf6eeae25..690b822c6152 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -160,45 +160,54 @@ int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5i_priv *ipriv = priv->ppriv;
- struct mlx5_core_qp *qp = &ipriv->qp;
- struct mlx5_qp_context *context;
int ret;
- /* QP states */
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
+ {
+ u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+ u32 *qpc;
- context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
- context->pri_path.port = 1;
- context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
- context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
+ qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
- goto err_qp_modify_to_err;
- }
- memset(context, 0, sizeof(*context));
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, primary_address_path.pkey_index,
+ ipriv->pkey_index);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
+ MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY);
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
- goto err_qp_modify_to_err;
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
}
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
- goto err_qp_modify_to_err;
+ {
+ u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
+ }
+ {
+ u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn);
+ ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
+ if (ret)
+ goto err_qp_modify_to_err;
}
-
- kfree(context);
return 0;
err_qp_modify_to_err:
- mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
- kfree(context);
+ {
+ u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {};
+
+ MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP);
+ MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn);
+ mlx5_cmd_exec_in(mdev, qp_2err, in);
+ }
return ret;
}
@@ -206,30 +215,24 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_qp_context context;
- int err;
+ u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
- err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
- &ipriv->qp);
- if (err)
- mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
+ MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
+ MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn);
+ mlx5_cmd_exec_in(mdev, qp_2rst, in);
}
#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
-int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
- u32 *in = NULL;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
+ struct mlx5i_priv *ipriv = priv->ppriv;
void *addr_path;
int ret = 0;
- int inlen;
void *qpc;
- inlen = MLX5_ST_SZ_BYTES(create_qp_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
@@ -240,20 +243,28 @@ int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp
MLX5_SET(ads, addr_path, vhca_port_num, 1);
MLX5_SET(ads, addr_path, grh, 1);
- ret = mlx5_core_create_qp(mdev, qp, in, inlen);
- if (ret) {
- mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
- goto out;
- }
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out);
+ if (ret)
+ return ret;
-out:
- kvfree(in);
- return ret;
+ ipriv->qpn = MLX5_GET(create_qp_out, out, qpn);
+
+ return 0;
}
-void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn)
{
- mlx5_core_destroy_qp(mdev, qp);
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
+}
+
+int mlx5i_update_nic_rx(struct mlx5e_priv *priv)
+{
+ return mlx5e_refresh_tirs(priv, true, true);
}
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
@@ -273,13 +284,13 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
struct mlx5i_priv *ipriv = priv->ppriv;
int err;
- err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
+ err = mlx5i_create_underlay_qp(priv);
if (err) {
mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -288,7 +299,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return 0;
err_destroy_underlay_qp:
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
return err;
}
@@ -297,7 +308,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
struct mlx5i_priv *ipriv = priv->ppriv;
mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
@@ -450,7 +461,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.cleanup_rx = mlx5i_cleanup_rx,
.enable = NULL, /* mlx5i_enable */
.disable = NULL, /* mlx5i_disable */
- .update_rx = mlx5e_update_nic_rx,
+ .update_rx = mlx5i_update_nic_rx,
.update_stats = NULL, /* mlx5i_update_stats */
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
@@ -500,12 +511,12 @@ int mlx5i_dev_init(struct net_device *dev)
struct mlx5i_priv *ipriv = priv->ppriv;
/* Set dev address using underlay QP */
- dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff;
- dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
- dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
+ dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff;
+ dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff;
+ dev->dev_addr[3] = (ipriv->qpn) & 0xff;
/* Add QPN to net-device mapping to HT */
- mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
+ mlx5i_pkey_add_qpn(dev, ipriv->qpn);
return 0;
}
@@ -532,7 +543,7 @@ void mlx5i_dev_cleanup(struct net_device *dev)
mlx5i_uninit_underlay_qp(priv);
/* Delete QPN to net-device mapping from HT */
- mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
+ mlx5i_pkey_del_qpn(dev, ipriv->qpn);
}
static int mlx5i_open(struct net_device *netdev)
@@ -552,7 +563,7 @@ static int mlx5i_open(struct net_device *netdev)
goto err_clear_state_opened_flag;
}
- err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
if (err) {
mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
goto err_reset_qp;
@@ -569,7 +580,7 @@ static int mlx5i_open(struct net_device *netdev)
return 0;
err_remove_fs_underlay_qp:
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_reset_qp:
mlx5i_uninit_underlay_qp(epriv);
err_clear_state_opened_flag:
@@ -595,7 +606,7 @@ static int mlx5i_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &epriv->state);
netif_carrier_off(epriv->netdev);
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
mlx5e_deactivate_priv_channels(epriv);
mlx5e_close_channels(&epriv->channels);
mlx5i_uninit_underlay_qp(epriv);
@@ -614,11 +625,12 @@ static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
struct mlx5i_priv *ipriv = epriv->ppriv;
int err;
- mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
- err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
+ mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
+ gid->raw);
+ err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn);
if (err)
mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
- ipriv->qp.qpn, gid->raw);
+ ipriv->qpn, gid->raw);
if (set_qkey) {
mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
@@ -637,12 +649,13 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
struct mlx5i_priv *ipriv = epriv->ppriv;
int err;
- mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
+ mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn,
+ gid->raw);
- err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
+ err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn);
if (err)
mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
- ipriv->qp.qpn, gid->raw);
+ ipriv->qpn, gid->raw);
return err;
}
@@ -655,7 +668,9 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
struct mlx5_ib_ah *mah = to_mah(address);
struct mlx5i_priv *ipriv = epriv->ppriv;
- return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
+ mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more());
+
+ return NETDEV_TX_OK;
}
static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index de7e01a027bb..79071a15c4ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -51,7 +51,7 @@ extern const struct ethtool_ops mlx5i_pkey_ethtool_ops;
/* ipoib rdma netdev's private data structure */
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
- struct mlx5_core_qp qp;
+ u32 qpn;
bool sub_interface;
u32 qkey;
u16 pkey_index;
@@ -62,8 +62,8 @@ struct mlx5i_priv {
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
/* Underlay QP create/destroy functions */
-int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
-void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+int mlx5i_create_underlay_qp(struct mlx5e_priv *priv);
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn);
/* Underlay QP state modification init/uninit functions */
int mlx5i_init_underlay_qp(struct mlx5e_priv *priv);
@@ -92,6 +92,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
void *ppriv);
void mlx5i_cleanup(struct mlx5e_priv *priv);
+int mlx5i_update_nic_rx(struct mlx5e_priv *priv);
+
/* Get child interface nic profile */
const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
@@ -110,19 +112,11 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_data_seg data[];
};
-static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
- struct mlx5i_tx_wqe **wqe,
- u16 pi)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
-
- *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memset(*wqe, 0, sizeof(**wqe));
-}
+#define MLX5I_SQ_FETCH_WQE(sq, pi) \
+ ((struct mlx5i_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5i_tx_wqe)))
-netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_av *av, u32 dqpn, u32 dqkey,
- bool xmit_more);
+void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 96e64187c089..f70367018862 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -204,13 +204,13 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_release_lock;
}
- err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn);
if (err) {
mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err);
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]);
+ err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -230,7 +230,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
err_clear_state_opened_flag:
mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp:
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_unint_underlay_qp:
mlx5i_uninit_underlay_qp(epriv);
err_release_lock:
@@ -253,7 +253,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
- mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -307,23 +307,20 @@ static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
int err;
- err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
- if (err) {
+ err = mlx5i_create_underlay_qp(priv);
+ if (err)
mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err);
- return err;
- }
- return 0;
+ return err;
}
static void mlx5i_pkey_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
static int mlx5i_pkey_init_rx(struct mlx5e_priv *priv)
@@ -350,7 +347,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.cleanup_rx = mlx5i_pkey_cleanup_rx,
.enable = NULL,
.disable = NULL,
- .update_rx = mlx5e_update_nic_rx,
+ .update_rx = mlx5i_update_nic_rx,
.update_stats = NULL,
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 93052b07c76c..874c70e8cc54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -42,13 +42,12 @@
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
*/
-static DEFINE_MUTEX(lag_mutex);
+static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
{
- u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
@@ -56,14 +55,13 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, create_lag, in);
}
static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
{
- u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
@@ -72,52 +70,29 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
-
- MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_lag, in);
}
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, create_vport_lag, in);
}
EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
- bool reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
-
- MLX5_SET(query_cong_statistics_in, in, opcode,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS);
- MLX5_SET(query_cong_statistics_in, in, clear, reset);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev)
{
@@ -232,12 +207,14 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
- err = mlx5_cmd_destroy_lag(dev0);
+ MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
+ err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
if (err) {
if (roce_lag) {
mlx5_core_err(dev0,
@@ -297,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!dev0 || !dev1)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
tracker = ldev->tracker;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -481,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev->tracker = tracker;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
if (changed)
mlx5_queue_bond_work(ldev, 0);
@@ -525,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
if (fn >= MLX5_MAX_PORTS)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -533,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
@@ -548,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS)
return;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
}
/* Must be called with intf_mutex held */
@@ -630,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -644,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_active(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -658,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return res;
}
@@ -687,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
@@ -704,12 +681,36 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
- mutex_unlock(&lag_mutex);
+ spin_unlock(&lag_lock);
return ndev;
}
EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
+u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ struct net_device *slave)
+{
+ struct mlx5_lag *ldev;
+ u8 port = 0;
+
+ spin_lock(&lag_lock);
+ ldev = mlx5_lag_dev_get(dev);
+ if (!(ldev && __mlx5_lag_is_roce(ldev)))
+ goto unlock;
+
+ if (ldev->pf[MLX5_LAG_P1].netdev == slave)
+ port = MLX5_LAG_P1;
+ else
+ port = MLX5_LAG_P2;
+
+ port = ldev->v2p_map[port];
+
+unlock:
+ spin_unlock(&lag_lock);
+ return port;
+}
+EXPORT_SYMBOL(mlx5_lag_get_slave_port);
+
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
@@ -746,7 +747,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
- mutex_lock(&lag_mutex);
+ spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
@@ -756,18 +757,23 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
+ spin_unlock(&lag_lock);
for (i = 0; i < num_ports; ++i) {
- ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
+ u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
+
+ MLX5_SET(query_cong_statistics_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+ ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
+ out);
if (ret)
- goto unlock;
+ goto free;
for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
}
-unlock:
- mutex_unlock(&lag_mutex);
+free:
kvfree(out);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 43f97601b500..ef0706d15a5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -32,6 +32,7 @@
#include <linux/clocksource.h>
#include <linux/highmem.h>
+#include <linux/ptp_clock_kernel.h>
#include <rdma/mlx5-abi.h>
#include "lib/eq.h"
#include "en.h"
@@ -66,6 +67,26 @@ enum {
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
+static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
+ struct ptp_system_timestamp *sts)
+{
+ u32 timer_h, timer_h1, timer_l;
+
+ timer_h = ioread32be(&dev->iseg->internal_timer_h);
+ ptp_read_system_prets(sts);
+ timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ ptp_read_system_postts(sts);
+ timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
+ if (timer_h != timer_h1) {
+ /* wrap around */
+ ptp_read_system_prets(sts);
+ timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)timer_l | (u64)timer_h1 << 32;
+}
+
static u64 read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 6cbccba56f70..3d5e57ff558c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -90,7 +90,8 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
}
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
- u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id)
+ u64 length, u32 log_alignment, u16 uid,
+ phys_addr_t *addr, u32 *obj_id)
{
u32 num_blocks = DIV_ROUND_UP_ULL(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
@@ -99,6 +100,7 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
unsigned long *block_map;
u64 icm_start_addr;
u32 log_icm_size;
+ u64 align_mask;
u32 max_blocks;
u64 block_idx;
void *sw_icm;
@@ -136,11 +138,14 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
return -EOPNOTSUPP;
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ if (log_alignment < MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+ log_alignment = MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+ align_mask = BIT(log_alignment - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) - 1;
+
spin_lock(&dm->lock);
- block_idx = bitmap_find_next_zero_area(block_map,
- max_blocks,
- 0,
- num_blocks, 0);
+ block_idx = bitmap_find_next_zero_area(block_map, max_blocks, 0,
+ num_blocks, align_mask);
if (block_idx < max_blocks)
bitmap_set(block_map,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 4be4d2d36218..4aaca7400fb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -27,7 +27,6 @@ struct mlx5_eq {
__be32 __iomem *doorbell;
u32 cons_index;
struct mlx5_frag_buf buf;
- int size;
unsigned int vecidx;
unsigned int irqn;
u8 eqn;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index 7722a3f9bb68..a68738c8f4bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -124,8 +124,7 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
const u8 *mac, bool vlan, u16 vlan_id, u8 port_num)
{
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
- u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {};
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
char *addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, in_addr,
source_l3_address);
@@ -153,6 +152,6 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_roce_address, in);
}
EXPORT_SYMBOL(mlx5_core_roce_gid_set);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 3118e8d66407..fd8449ff9e17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -40,8 +40,7 @@
/* HW L2 Table (MPFS) management */
static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac)
{
- u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {};
u8 *in_mac_addr;
MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
@@ -50,17 +49,16 @@ static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac)
in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
ether_addr_copy(&in_mac_addr[2], mac);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_l2_table_entry, in);
}
static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
{
- u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {};
MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, delete_l2_table_entry, in);
}
/* UC L2 table hash node */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index 48b5c847b642..e042e0924079 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -4,7 +4,6 @@
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/port_tun.h"
@@ -145,11 +144,11 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy,
int reformat_type)
{
- /* the default is error for unknown (non VXLAN/GRE tunnel types) */
int err = -EOPNOTSUPP;
mutex_lock(&tun_entropy->lock);
- if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN &&
+ if ((reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN ||
+ reformat_type == MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL) &&
tun_entropy->enabled) {
/* in case entropy calculation is enabled for all tunneling
* types, it is ok for VXLAN, so approve.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 148b55c3db7a..82c766a95165 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -60,24 +60,22 @@ static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {};
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, add_vxlan_udp_dport, in);
}
static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
- u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {};
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in);
}
static struct mlx5_vxlan_port*
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 17f818a54090..df46b1fce3a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -177,6 +177,11 @@ static struct mlx5_profile profile[] = {
#define FW_PRE_INIT_TIMEOUT_MILI 120000
#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
+static int fw_initializing(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->initializing) >> 31;
+}
+
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili)
{
@@ -206,8 +211,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
{
int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
driver_version);
- u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
- u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
+ u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
int remaining_size = driver_ver_sz;
char *string;
@@ -234,7 +238,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
MLX5_SET(set_driver_version_in, in, opcode,
MLX5_CMD_OP_SET_DRIVER_VERSION);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, set_driver_version, in);
}
static int set_dma_caps(struct pci_dev *pdev)
@@ -366,7 +370,7 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
if (err) {
mlx5_core_warn(dev,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
@@ -407,30 +411,25 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
}
-static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
+static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
{
- u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
-
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
- return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_hca_cap, in);
}
-static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
+static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
{
- void *set_ctx;
void *set_hca_cap;
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
int req_endianness;
int err;
- if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
- if (err)
- return err;
- } else {
+ if (!MLX5_CAP_GEN(dev, atomic))
return 0;
- }
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
+ if (err)
+ return err;
req_endianness =
MLX5_CAP_ATOMIC(dev,
@@ -439,27 +438,18 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
return 0;
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- return -ENOMEM;
-
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
/* Set requestor to host endianness */
MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
- err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
-
- kfree(set_ctx);
- return err;
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
}
-static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
+static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
- void *set_ctx;
- int set_sz;
bool do_set = false;
int err;
@@ -471,11 +461,6 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
if (err)
return err;
- set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- return -ENOMEM;
-
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
MLX5_ST_SZ_BYTES(odp_cap));
@@ -504,30 +489,21 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
- if (do_set)
- err = set_caps(dev, set_ctx, set_sz,
- MLX5_SET_HCA_CAP_OP_MOD_ODP);
-
- kfree(set_ctx);
+ if (!do_set)
+ return 0;
- return err;
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
}
-static int handle_hca_cap(struct mlx5_core_dev *dev)
+static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
- void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
- int err = -ENOMEM;
- int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
void *set_hca_cap;
-
- set_ctx = kzalloc(set_sz, GFP_KERNEL);
- if (!set_ctx)
- goto query_ex;
+ int err;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
- goto query_ex;
+ return err;
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
@@ -578,37 +554,76 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
num_vhca_ports,
MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
- err = set_caps(dev, set_ctx, set_sz,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
-query_ex:
- kfree(set_ctx);
+ return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+}
+
+static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, roce))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
+ !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE],
+ MLX5_ST_SZ_BYTES(roce_cap));
+ MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
+
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
return err;
}
static int set_hca_cap(struct mlx5_core_dev *dev)
{
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_ctx;
int err;
- err = handle_hca_cap(dev);
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
+ if (!set_ctx)
+ return -ENOMEM;
+
+ err = handle_hca_cap(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap failed\n");
goto out;
}
- err = handle_hca_cap_atomic(dev);
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_atomic(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
goto out;
}
- err = handle_hca_cap_odp(dev);
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_odp(dev, set_ctx);
if (err) {
mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_roce(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
+ goto out;
+ }
+
out:
+ kfree(set_ctx);
return err;
}
@@ -642,58 +657,35 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
- return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, enable_hca, in);
}
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
MLX5_SET(enable_hca_in, in, embedded_cpu_function,
dev->caps.embedded_cpu);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
- struct ptp_system_timestamp *sts)
-{
- u32 timer_h, timer_h1, timer_l;
-
- timer_h = ioread32be(&dev->iseg->internal_timer_h);
- ptp_read_system_prets(sts);
- timer_l = ioread32be(&dev->iseg->internal_timer_l);
- ptp_read_system_postts(sts);
- timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
- if (timer_h != timer_h1) {
- /* wrap around */
- ptp_read_system_prets(sts);
- timer_l = ioread32be(&dev->iseg->internal_timer_l);
- ptp_read_system_postts(sts);
- }
-
- return (u64)timer_l | (u64)timer_h1 << 32;
+ return mlx5_cmd_exec_in(dev, disable_hca, in);
}
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
{
- u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
- u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
u32 sup_issi;
int err;
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
- err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
- query_out, sizeof(query_out));
+ err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
if (err) {
u32 syndrome;
u8 status;
@@ -713,13 +705,11 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
if (sup_issi & (1 << 1)) {
- u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
- u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
MLX5_SET(set_issi_in, set_in, current_issi, 1);
- err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
- set_out, sizeof(set_out));
+ err = mlx5_cmd_exec_in(dev, set_issi, set_in);
if (err) {
mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
err);
@@ -782,7 +772,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
-
+ dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
return 0;
err_clr_master:
@@ -836,8 +826,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_cq_debugfs_init(dev);
- mlx5_init_qp_table(dev);
-
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
@@ -896,7 +884,6 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
- mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
err_eq_cleanup:
@@ -924,7 +911,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
- mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
@@ -1184,7 +1170,6 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
{
int err = 0;
- dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "interface is up, NOP\n");
@@ -1221,10 +1206,9 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
mlx5_register_device(dev);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
-out:
- mutex_unlock(&dev->intf_state_mutex);
- return err;
+ mutex_unlock(&dev->intf_state_mutex);
+ return 0;
err_devlink_reg:
mlx5_unload(dev);
@@ -1235,17 +1219,15 @@ function_teardown:
mlx5_function_teardown(dev, boot);
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+out:
mutex_unlock(&dev->intf_state_mutex);
-
return err;
}
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
- if (cleanup) {
+ if (cleanup)
mlx5_unregister_device(dev);
- mlx5_drain_health_wq(dev);
- }
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1295,7 +1277,7 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mlx5_debugfs_root);
if (!priv->dbg_root) {
dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n");
- return -ENOMEM;
+ goto err_dbg_root;
}
err = mlx5_health_init(dev);
@@ -1312,15 +1294,27 @@ err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
debugfs_remove(dev->priv.dbg_root);
-
+err_dbg_root:
+ mutex_destroy(&priv->pgdir_mutex);
+ mutex_destroy(&priv->alloc_mutex);
+ mutex_destroy(&priv->bfregs.wc_head.lock);
+ mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->intf_state_mutex);
return err;
}
static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
+ struct mlx5_priv *priv = &dev->priv;
+
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
+ mutex_destroy(&priv->pgdir_mutex);
+ mutex_destroy(&priv->alloc_mutex);
+ mutex_destroy(&priv->bfregs.wc_head.lock);
+ mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->intf_state_mutex);
}
#define MLX5_IB_MOD "mlx5_ib"
@@ -1388,6 +1382,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
+ mlx5_drain_health_wq(dev);
mlx5_unload_one(dev, true);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index ba2b09cc192f..e019d68062d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -33,34 +33,31 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
void *gid;
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
- u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
void *gid;
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a8fb43a85d1d..fc1649dac11b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -38,7 +38,6 @@
#include <linux/sched.h>
#include <linux/if_link.h>
#include <linux/firmware.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
@@ -141,8 +140,6 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
- struct ptp_system_timestamp *sts);
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 366f2cbfc6db..9eb51f06d3ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -33,14 +33,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen)
{
- u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
u32 mkey_index;
void *mkc;
int err;
@@ -66,19 +65,18 @@ EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey)
{
- u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_mkey, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {};
memset(out, 0, outlen);
MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
@@ -100,8 +98,8 @@ static inline u32 mlx5_get_psv(u32 *out, int psv_index)
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
- u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {};
int i, err;
if (npsvs > MLX5_MAX_PSVS)
@@ -111,7 +109,7 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
MLX5_SET(create_psv_in, in, pd, pdn);
MLX5_SET(create_psv_in, in, num_psv, npsvs);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_psv, in, out);
if (err)
return err;
@@ -124,11 +122,10 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
- u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {};
MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
MLX5_SET(destroy_psv_in, in, psvn, psv_num);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_psv, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 91bd258ecf1b..5ddd18639a1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/eq.h"
@@ -51,6 +50,7 @@ struct mlx5_pages_req {
u8 ec_function;
s32 npages;
struct work_struct work;
+ u8 release_all;
};
struct fw_page {
@@ -136,8 +136,8 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
- u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
int err;
MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
@@ -146,7 +146,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
if (err)
return err;
@@ -156,15 +156,21 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
{
- struct fw_page *fp;
+ struct fw_page *fp = NULL;
+ struct fw_page *iter;
unsigned n;
- if (list_empty(&dev->priv.free_list))
+ list_for_each_entry(iter, &dev->priv.free_list, list) {
+ if (iter->func_id != func_id)
+ continue;
+ fp = iter;
+ }
+
+ if (list_empty(&dev->priv.free_list) || !fp)
return -ENOMEM;
- fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
if (n >= MLX5_NUM_4K_IN_PAGE) {
mlx5_core_warn(dev, "alloc 4k bug\n");
@@ -182,6 +188,18 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
+static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
+ bool in_free_list)
+{
+ rb_erase(&fwp->rb_node, &dev->priv.page_root);
+ if (in_free_list)
+ list_del(&fwp->list);
+ dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(fwp->page);
+ kfree(fwp);
+}
+
static void free_4k(struct mlx5_core_dev *dev, u64 addr)
{
struct fw_page *fwp;
@@ -189,24 +207,16 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
if (!fwp) {
- mlx5_core_warn(dev, "page not found\n");
+ mlx5_core_warn_rl(dev, "page not found\n");
return;
}
-
n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
- if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
- rb_erase(&fwp->rb_node, &dev->priv.page_root);
- if (fwp->free_count != 1)
- list_del(&fwp->list);
- dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(fwp->page);
- kfree(fwp);
- } else if (fwp->free_count == 1) {
+ if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
+ free_fwp(dev, fwp, fwp->free_count != 1);
+ else if (fwp->free_count == 1)
list_add(&fwp->list, &dev->priv.free_list);
- }
}
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
@@ -257,8 +267,7 @@ err_mapping:
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
- u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int err;
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
@@ -266,7 +275,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, manage_pages, in);
if (err)
mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
func_id, err);
@@ -292,7 +301,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
for (i = 0; i < npages; i++) {
retry:
- err = alloc_4k(dev, &addr);
+ err = alloc_4k(dev, &addr, func_id);
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, func_id);
@@ -339,6 +348,33 @@ out_free:
return err;
}
+static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+ bool ec_function)
+{
+ struct rb_node *p;
+ int npages = 0;
+
+ p = rb_first(&dev->priv.page_root);
+ while (p) {
+ struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
+
+ p = rb_next(p);
+ if (fwp->func_id != func_id)
+ continue;
+ npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
+ free_fwp(dev, fwp, fwp->free_count);
+ }
+
+ dev->priv.fw_pages -= npages;
+ if (func_id)
+ dev->priv.vfs_pages -= npages;
+ else if (mlx5_core_is_ecpf(dev) && !ec_function)
+ dev->priv.peer_pf_pages -= npages;
+
+ mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
+ npages, ec_function, func_id);
+}
+
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 *in, int in_size, u32 *out, int out_size)
{
@@ -374,7 +410,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed, bool ec_function)
{
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
- u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
u32 *out;
int err;
@@ -432,7 +468,9 @@ static void pages_work_handler(struct work_struct *work)
struct mlx5_core_dev *dev = req->dev;
int err = 0;
- if (req->npages < 0)
+ if (req->release_all)
+ release_all_pages(dev, req->func_id, req->ec_function);
+ else if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
req->ec_function);
else if (req->npages > 0)
@@ -447,6 +485,7 @@ static void pages_work_handler(struct work_struct *work)
enum {
EC_FUNCTION_MASK = 0x8000,
+ RELEASE_ALL_PAGES_MASK = 0x4000,
};
static int req_pages_handler(struct notifier_block *nb,
@@ -457,6 +496,7 @@ static int req_pages_handler(struct notifier_block *nb,
struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
bool ec_function;
+ bool release_all;
u16 func_id;
s32 npages;
@@ -467,8 +507,10 @@ static int req_pages_handler(struct notifier_block *nb,
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
- mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
- func_id, npages);
+ release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
+ RELEASE_ALL_PAGES_MASK;
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
+ func_id, npages, release_all);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
mlx5_core_warn(dev, "failed to allocate pages request\n");
@@ -479,6 +521,7 @@ static int req_pages_handler(struct notifier_block *nb,
req->func_id = func_id;
req->npages = npages;
req->ec_function = ec_function;
+ req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index bd830d8d6c5f..aabc53ad8bdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -33,17 +33,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
int err;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_pd, in, out);
if (!err)
*pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
@@ -52,11 +51,10 @@ EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_pd, in);
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index cc262b30aed5..9f829e68fc73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -763,24 +763,23 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {};
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
- u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {};
int err;
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_wol_rol, in, out);
if (!err)
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
deleted file mode 100644
index c3aea4cc2fff..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ /dev/null
@@ -1,737 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/gfp.h>
-#include <linux/export.h>
-#include <linux/mlx5/cmd.h>
-#include <linux/mlx5/qp.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/transobj.h>
-
-#include "mlx5_core.h"
-#include "lib/eq.h"
-
-static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct);
-
-static struct mlx5_core_rsc_common *
-mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
-{
- struct mlx5_core_rsc_common *common;
- unsigned long flags;
-
- spin_lock_irqsave(&table->lock, flags);
-
- common = radix_tree_lookup(&table->tree, rsn);
- if (common)
- refcount_inc(&common->refcount);
-
- spin_unlock_irqrestore(&table->lock, flags);
-
- return common;
-}
-
-void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
-{
- if (refcount_dec_and_test(&common->refcount))
- complete(&common->free);
-}
-
-static u64 qp_allowed_event_types(void)
-{
- u64 mask;
-
- mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
- BIT(MLX5_EVENT_TYPE_COMM_EST) |
- BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
- BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
- BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
- BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
- BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
- BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
-
- return mask;
-}
-
-static u64 rq_allowed_event_types(void)
-{
- u64 mask;
-
- mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
- BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
-
- return mask;
-}
-
-static u64 sq_allowed_event_types(void)
-{
- return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
-}
-
-static u64 dct_allowed_event_types(void)
-{
- return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
-}
-
-static bool is_event_type_allowed(int rsc_type, int event_type)
-{
- switch (rsc_type) {
- case MLX5_EVENT_QUEUE_TYPE_QP:
- return BIT(event_type) & qp_allowed_event_types();
- case MLX5_EVENT_QUEUE_TYPE_RQ:
- return BIT(event_type) & rq_allowed_event_types();
- case MLX5_EVENT_QUEUE_TYPE_SQ:
- return BIT(event_type) & sq_allowed_event_types();
- case MLX5_EVENT_QUEUE_TYPE_DCT:
- return BIT(event_type) & dct_allowed_event_types();
- default:
- WARN(1, "Event arrived for unknown resource type");
- return false;
- }
-}
-
-static int rsc_event_notifier(struct notifier_block *nb,
- unsigned long type, void *data)
-{
- struct mlx5_core_rsc_common *common;
- struct mlx5_qp_table *table;
- struct mlx5_core_dev *dev;
- struct mlx5_core_dct *dct;
- u8 event_type = (u8)type;
- struct mlx5_core_qp *qp;
- struct mlx5_priv *priv;
- struct mlx5_eqe *eqe;
- u32 rsn;
-
- switch (event_type) {
- case MLX5_EVENT_TYPE_DCT_DRAINED:
- eqe = data;
- rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
- rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
- break;
- case MLX5_EVENT_TYPE_PATH_MIG:
- case MLX5_EVENT_TYPE_COMM_EST:
- case MLX5_EVENT_TYPE_SQ_DRAINED:
- case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
- case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
- case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
- case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
- eqe = data;
- rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
- rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
- break;
- default:
- return NOTIFY_DONE;
- }
-
- table = container_of(nb, struct mlx5_qp_table, nb);
- priv = container_of(table, struct mlx5_priv, qp_table);
- dev = container_of(priv, struct mlx5_core_dev, priv);
-
- mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
-
- common = mlx5_get_rsc(table, rsn);
- if (!common) {
- mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
- return NOTIFY_OK;
- }
-
- if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
- mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
- event_type, rsn);
- goto out;
- }
-
- switch (common->res) {
- case MLX5_RES_QP:
- case MLX5_RES_RQ:
- case MLX5_RES_SQ:
- qp = (struct mlx5_core_qp *)common;
- qp->event(qp, event_type);
- break;
- case MLX5_RES_DCT:
- dct = (struct mlx5_core_dct *)common;
- if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
- complete(&dct->drained);
- break;
- default:
- mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
- }
-out:
- mlx5_core_put_rsc(common);
-
- return NOTIFY_OK;
-}
-
-static int create_resource_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- int rsc_type)
-{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
- int err;
-
- qp->common.res = rsc_type;
- spin_lock_irq(&table->lock);
- err = radix_tree_insert(&table->tree,
- qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
- qp);
- spin_unlock_irq(&table->lock);
- if (err)
- return err;
-
- refcount_set(&qp->common.refcount, 1);
- init_completion(&qp->common.free);
- qp->pid = current->pid;
-
- return 0;
-}
-
-static void destroy_resource_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
-{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
- unsigned long flags;
-
- spin_lock_irqsave(&table->lock, flags);
- radix_tree_delete(&table->tree,
- qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
- spin_unlock_irqrestore(&table->lock, flags);
- mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
- wait_for_completion(&qp->common.free);
-}
-
-static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct, bool need_cleanup)
-{
- u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
- struct mlx5_core_qp *qp = &dct->mqp;
- int err;
-
- err = mlx5_core_drain_dct(dev, dct);
- if (err) {
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- goto destroy;
- } else {
- mlx5_core_warn(
- dev, "failed drain DCT 0x%x with error 0x%x\n",
- qp->qpn, err);
- return err;
- }
- }
- wait_for_completion(&dct->drained);
-destroy:
- if (need_cleanup)
- destroy_resource_common(dev, &dct->mqp);
- MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
- MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
- MLX5_SET(destroy_dct_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
- return err;
-}
-
-int mlx5_core_create_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct,
- u32 *in, int inlen,
- u32 *out, int outlen)
-{
- struct mlx5_core_qp *qp = &dct->mqp;
- int err;
-
- init_completion(&dct->drained);
- MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
-
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
- if (err) {
- mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
- return err;
- }
-
- qp->qpn = MLX5_GET(create_dct_out, out, dctn);
- qp->uid = MLX5_GET(create_dct_in, in, uid);
- err = create_resource_common(dev, qp, MLX5_RES_DCT);
- if (err)
- goto err_cmd;
-
- return 0;
-err_cmd:
- _mlx5_core_destroy_dct(dev, dct, false);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
-
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- u32 *in, int inlen)
-{
- u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
- u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
- u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
- int err;
-
- MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
-
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- if (err)
- return err;
-
- qp->uid = MLX5_GET(create_qp_in, in, uid);
- qp->qpn = MLX5_GET(create_qp_out, out, qpn);
- mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
-
- err = create_resource_common(dev, qp, MLX5_RES_QP);
- if (err)
- goto err_cmd;
-
- err = mlx5_debug_qp_add(dev, qp);
- if (err)
- mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
- qp->qpn);
-
- atomic_inc(&dev->num_qps);
-
- return 0;
-
-err_cmd:
- memset(din, 0, sizeof(din));
- memset(dout, 0, sizeof(dout));
- MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
- MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
- MLX5_SET(destroy_qp_in, din, uid, qp->uid);
- mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
-
-static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct)
-{
- u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
- struct mlx5_core_qp *qp = &dct->mqp;
-
- MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
- MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
- MLX5_SET(drain_dct_in, in, uid, qp->uid);
- return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)&out, sizeof(out));
-}
-
-int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
- struct mlx5_core_dct *dct)
-{
- return _mlx5_core_destroy_dct(dev, dct, true);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
-
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
-{
- u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
- int err;
-
- mlx5_debug_qp_remove(dev, qp);
-
- destroy_resource_common(dev, qp);
-
- MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
- MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
- MLX5_SET(destroy_qp_in, in, uid, qp->uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- atomic_dec(&dev->num_qps);
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-
-int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
- u32 timeout_usec)
-{
- u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
-
- MLX5_SET(set_delay_drop_params_in, in, opcode,
- MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
- MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
- timeout_usec / 100);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
-
-struct mbox_info {
- u32 *in;
- u32 *out;
- int inlen;
- int outlen;
-};
-
-static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
-{
- mbox->inlen = inlen;
- mbox->outlen = outlen;
- mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
- mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
- if (!mbox->in || !mbox->out) {
- kfree(mbox->in);
- kfree(mbox->out);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void mbox_free(struct mbox_info *mbox)
-{
- kfree(mbox->in);
- kfree(mbox->out);
-}
-
-static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
- u32 opt_param_mask, void *qpc,
- struct mbox_info *mbox, u16 uid)
-{
- mbox->out = NULL;
- mbox->in = NULL;
-
-#define MBOX_ALLOC(mbox, typ) \
- mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
-
-#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
- do { \
- MLX5_SET(typ##_in, in, opcode, _opcode); \
- MLX5_SET(typ##_in, in, qpn, _qpn); \
- MLX5_SET(typ##_in, in, uid, _uid); \
- } while (0)
-
-#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
- do { \
- MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
- MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
- memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
- MLX5_ST_SZ_BYTES(qpc)); \
- } while (0)
-
- switch (opcode) {
- /* 2RST & 2ERR */
- case MLX5_CMD_OP_2RST_QP:
- if (MBOX_ALLOC(mbox, qp_2rst))
- return -ENOMEM;
- MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
- break;
- case MLX5_CMD_OP_2ERR_QP:
- if (MBOX_ALLOC(mbox, qp_2err))
- return -ENOMEM;
- MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
- break;
-
- /* MODIFY with QPC */
- case MLX5_CMD_OP_RST2INIT_QP:
- if (MBOX_ALLOC(mbox, rst2init_qp))
- return -ENOMEM;
- MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc, uid);
- break;
- case MLX5_CMD_OP_INIT2RTR_QP:
- if (MBOX_ALLOC(mbox, init2rtr_qp))
- return -ENOMEM;
- MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc, uid);
- break;
- case MLX5_CMD_OP_RTR2RTS_QP:
- if (MBOX_ALLOC(mbox, rtr2rts_qp))
- return -ENOMEM;
- MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc, uid);
- break;
- case MLX5_CMD_OP_RTS2RTS_QP:
- if (MBOX_ALLOC(mbox, rts2rts_qp))
- return -ENOMEM;
- MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc, uid);
- break;
- case MLX5_CMD_OP_SQERR2RTS_QP:
- if (MBOX_ALLOC(mbox, sqerr2rts_qp))
- return -ENOMEM;
- MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc, uid);
- break;
- case MLX5_CMD_OP_INIT2INIT_QP:
- if (MBOX_ALLOC(mbox, init2init_qp))
- return -ENOMEM;
- MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc, uid);
- break;
- default:
- mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
- opcode, qpn);
- return -EINVAL;
- }
- return 0;
-}
-
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
- u32 opt_param_mask, void *qpc,
- struct mlx5_core_qp *qp)
-{
- struct mbox_info mbox;
- int err;
-
- err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
- opt_param_mask, qpc, &mbox, qp->uid);
- if (err)
- return err;
-
- err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
- mbox_free(&mbox);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
-
-void mlx5_init_qp_table(struct mlx5_core_dev *dev)
-{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
-
- memset(table, 0, sizeof(*table));
- spin_lock_init(&table->lock);
- INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
- mlx5_qp_debugfs_init(dev);
-
- table->nb.notifier_call = rsc_event_notifier;
- mlx5_notifier_register(dev, &table->nb);
-}
-
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
-{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
-
- mlx5_notifier_unregister(dev, &table->nb);
- mlx5_qp_debugfs_cleanup(dev);
-}
-
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
-
- MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
- MLX5_SET(query_qp_in, in, qpn, qp->qpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
-
-int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
- struct mlx5_core_qp *qp = &dct->mqp;
-
- MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
- MLX5_SET(query_dct_in, in, dctn, qp->qpn);
-
- return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
- (void *)out, outlen);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
-
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
-{
- u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
- int err;
-
- MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
-
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
-{
- u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
-
- MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
- MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
-
-static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
-
- MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
- MLX5_SET(destroy_rq_in, in, rqn, rqn);
- MLX5_SET(destroy_rq_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
- struct mlx5_core_qp *rq)
-{
- int err;
- u32 rqn;
-
- err = mlx5_core_create_rq(dev, in, inlen, &rqn);
- if (err)
- return err;
-
- rq->uid = MLX5_GET(create_rq_in, in, uid);
- rq->qpn = rqn;
- err = create_resource_common(dev, rq, MLX5_RES_RQ);
- if (err)
- goto err_destroy_rq;
-
- return 0;
-
-err_destroy_rq:
- destroy_rq_tracked(dev, rq->qpn, rq->uid);
-
- return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
-
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *rq)
-{
- destroy_resource_common(dev, rq);
- destroy_rq_tracked(dev, rq->qpn, rq->uid);
-}
-EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
-
-static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
-
- MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
- MLX5_SET(destroy_sq_in, in, sqn, sqn);
- MLX5_SET(destroy_sq_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
- struct mlx5_core_qp *sq)
-{
- int err;
- u32 sqn;
-
- err = mlx5_core_create_sq(dev, in, inlen, &sqn);
- if (err)
- return err;
-
- sq->uid = MLX5_GET(create_sq_in, in, uid);
- sq->qpn = sqn;
- err = create_resource_common(dev, sq, MLX5_RES_SQ);
- if (err)
- goto err_destroy_sq;
-
- return 0;
-
-err_destroy_sq:
- destroy_sq_tracked(dev, sq->qpn, sq->uid);
-
- return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
-
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *sq)
-{
- destroy_resource_common(dev, sq);
- destroy_sq_tracked(dev, sq->qpn, sq->uid);
-}
-EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
-
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
-{
- u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
- int err;
-
- MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *counter_id = MLX5_GET(alloc_q_counter_out, out,
- counter_set_id);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
-
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
-{
- u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
-
- MLX5_SET(dealloc_q_counter_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_Q_COUNTER);
- MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
-
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
- int reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
-
- MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
- MLX5_SET(query_q_counter_in, in, clear, reset);
- MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
-
-struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
- int res_num,
- enum mlx5_res_type res_type)
-{
- u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
- struct mlx5_qp_table *table = &dev->priv.qp_table;
-
- return mlx5_get_rsc(table, rsn);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
-
-void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
-{
- mlx5_core_put_rsc(res);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_res_put);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index f3b29d9ade1f..99039c47ef33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -33,15 +33,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
/* Scheduling element fw management */
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 *element_id)
{
- u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
void *schedc;
int err;
@@ -53,7 +52,7 @@ int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out);
if (err)
return err;
@@ -66,8 +65,7 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 element_id,
u32 modify_bitmask)
{
- u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {};
void *schedc;
schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
@@ -82,14 +80,13 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
hierarchy);
memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_scheduling_element, in);
}
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id)
{
- u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {};
MLX5_SET(destroy_scheduling_element_in, in, opcode,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
@@ -98,7 +95,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
hierarchy);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in);
}
static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
@@ -145,8 +142,7 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
struct mlx5_rl_entry *entry, bool set)
{
- u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
void *pp_context;
pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
@@ -156,7 +152,7 @@ static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
if (set)
memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in);
}
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 554811de4c9d..df1363a34a42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1662,7 +1662,7 @@ dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
}
static bool
-dr_action_modify_check_is_ttl_modify(const u64 *sw_action)
+dr_action_modify_check_is_ttl_modify(const void *sw_action)
{
u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 461b39376daf..6bd34b293007 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -18,7 +18,7 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
if (err)
return err;
@@ -51,7 +51,7 @@ int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
HCA_CAP_OPMOD_GET_CUR);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
if (err) {
kfree(out);
return err;
@@ -141,7 +141,7 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(query_flow_table_in, in, table_type, type);
MLX5_SET(query_flow_table_in, in, table_id, table_id);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
if (err)
return err;
@@ -158,12 +158,11 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
{
- u32 out[MLX5_ST_SZ_DW(sync_steering_out)] = {};
u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, sync_steering, in);
}
int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
@@ -214,14 +213,13 @@ int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id)
{
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {};
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, table_type);
MLX5_SET(delete_fte_in, in, table_id, table_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, delete_fte, in);
}
int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
@@ -263,7 +261,6 @@ out:
int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
u32 modify_header_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
@@ -271,7 +268,7 @@ int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
modify_header_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
}
int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
@@ -292,7 +289,7 @@ int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_group_in, in, table_type, table_type);
MLX5_SET(create_flow_group_in, in, table_id, table_id);
- err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
if (err)
goto out;
@@ -309,14 +306,14 @@ int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
u32 group_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {};
- MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
}
int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
@@ -360,7 +357,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
attr->reformat_en);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
if (err)
return err;
@@ -379,7 +376,6 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
u32 table_id,
u32 table_type)
{
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
MLX5_SET(destroy_flow_table_in, in, opcode,
@@ -387,7 +383,7 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
}
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
@@ -434,7 +430,6 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
u32 reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
@@ -442,7 +437,7 @@ void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
reformat_id);
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
}
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
@@ -458,7 +453,7 @@ int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
MLX5_SET(query_roce_address_in, in, roce_address_index, index);
MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 48b6358b6845..890767a2a7cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -297,7 +297,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
dmn->mdev = mdev;
dmn->type = type;
refcount_set(&dmn->refcount, 1);
- mutex_init(&dmn->mutex);
+ mutex_init(&dmn->info.rx.mutex);
+ mutex_init(&dmn->info.tx.mutex);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n");
@@ -345,9 +346,9 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
int ret = 0;
if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
- mutex_lock(&dmn->mutex);
+ mlx5dr_domain_lock(dmn);
ret = mlx5dr_send_ring_force_drain(dmn);
- mutex_unlock(&dmn->mutex);
+ mlx5dr_domain_unlock(dmn);
if (ret) {
mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
flags, ret);
@@ -371,7 +372,8 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_cache(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
- mutex_destroy(&dmn->mutex);
+ mutex_destroy(&dmn->info.tx.mutex);
+ mutex_destroy(&dmn->info.rx.mutex);
kfree(dmn);
return 0;
}
@@ -379,7 +381,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn)
{
- mutex_lock(&dmn->mutex);
+ mlx5dr_domain_lock(dmn);
if (dmn->peer_dmn)
refcount_dec(&dmn->peer_dmn->refcount);
@@ -389,5 +391,5 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
if (dmn->peer_dmn)
refcount_inc(&dmn->peer_dmn->refcount);
- mutex_unlock(&dmn->mutex);
+ mlx5dr_domain_unlock(dmn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 30d2d7376f56..cc33515b9aba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -95,13 +95,12 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
}
static struct mlx5dr_icm_mr *
-dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
- enum mlx5_sw_icm_type type,
- size_t align_base)
+dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5_core_dev *mdev = pool->dmn->mdev;
+ enum mlx5_sw_icm_type dm_type;
struct mlx5dr_icm_mr *icm_mr;
- size_t align_diff;
+ size_t log_align_base;
int err;
icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
@@ -111,14 +110,22 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
icm_mr->pool = pool;
INIT_LIST_HEAD(&icm_mr->mr_list);
- icm_mr->dm.type = type;
-
- /* 2^log_biggest_table * entry-size * double-for-alignment */
icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type) * 2;
+ pool->icm_type);
+
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ dm_type = MLX5_SW_ICM_TYPE_STEERING;
+ log_align_base = ilog2(icm_mr->dm.length);
+ } else {
+ dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ /* Align base is 64B */
+ log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
+ }
+ icm_mr->dm.type = dm_type;
- err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
- &icm_mr->dm.addr, &icm_mr->dm.obj_id);
+ err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
+ log_align_base, 0, &icm_mr->dm.addr,
+ &icm_mr->dm.obj_id);
if (err) {
mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
goto free_icm_mr;
@@ -137,15 +144,18 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
icm_mr->icm_start_addr = icm_mr->dm.addr;
- /* align_base is always a power of 2 */
- align_diff = icm_mr->icm_start_addr & (align_base - 1);
- if (align_diff)
- icm_mr->used_length = align_base - align_diff;
+ if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
+ mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
+ log_align_base);
+ goto free_mkey;
+ }
list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
return icm_mr;
+free_mkey:
+ mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
free_dm:
mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
icm_mr->dm.addr, icm_mr->dm.obj_id);
@@ -200,24 +210,11 @@ static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
struct mlx5dr_icm_pool *pool = bucket->pool;
struct mlx5dr_icm_mr *icm_mr = NULL;
struct mlx5dr_icm_chunk *chunk;
- enum mlx5_sw_icm_type dm_type;
- size_t align_base;
int i, err = 0;
mr_req_size = bucket->num_of_entries * bucket->entry_size;
mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool->icm_type);
-
- if (pool->icm_type == DR_ICM_TYPE_STE) {
- dm_type = MLX5_SW_ICM_TYPE_STEERING;
- /* Align base is the biggest chunk size / row size */
- align_base = mr_row_size;
- } else {
- dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
- /* Align base is 64B */
- align_base = DR_ICM_MODIFY_HDR_ALIGN_BASE;
- }
-
mutex_lock(&pool->mr_mutex);
if (!list_empty(&pool->icm_mr_list)) {
icm_mr = list_last_entry(&pool->icm_mr_list,
@@ -228,7 +225,7 @@ static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
}
if (!icm_mr || mr_free_size < mr_row_size) {
- icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base);
+ icm_mr = dr_icm_pool_mr_create(pool);
if (!icm_mr) {
err = -ENOMEM;
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index a95938874798..31abcbb95ca2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -690,7 +690,7 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
refcount_set(&matcher->refcount, 1);
INIT_LIST_HEAD(&matcher->matcher_list);
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
ret = dr_matcher_init(matcher, mask);
if (ret)
@@ -700,14 +700,14 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
if (ret)
goto matcher_uninit;
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
return matcher;
matcher_uninit:
dr_matcher_uninit(matcher);
free_matcher:
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
dec_ref:
refcount_dec(&tbl->refcount);
@@ -791,13 +791,13 @@ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
if (refcount_read(&matcher->refcount) > 1)
return -EBUSY;
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
dr_matcher_remove_from_tbl(matcher);
dr_matcher_uninit(matcher);
refcount_dec(&matcher->tbl->refcount);
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index cce3ee7a6614..cd708dcc2e3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -938,7 +938,10 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
+ mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
dr_rule_clean_rule_members(rule, nic_rule);
+ mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
+
return 0;
}
@@ -1039,18 +1042,18 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
return 0;
+ hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
+ if (!hw_ste_arr)
+ return -ENOMEM;
+
+ mlx5dr_domain_nic_lock(nic_dmn);
+
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
- goto out_err;
-
- hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
- if (!hw_ste_arr) {
- ret = -ENOMEM;
- goto out_err;
- }
+ goto free_hw_ste;
/* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
@@ -1115,6 +1118,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (htbl)
mlx5dr_htbl_put(htbl);
+ mlx5dr_domain_nic_unlock(nic_dmn);
+
kfree(hw_ste_arr);
return 0;
@@ -1129,8 +1134,8 @@ free_rule:
kfree(ste_info);
}
free_hw_ste:
+ mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
-out_err:
return ret;
}
@@ -1232,31 +1237,23 @@ struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_rule *rule;
- mutex_lock(&matcher->tbl->dmn->mutex);
refcount_inc(&matcher->refcount);
rule = dr_rule_create_rule(matcher, value, num_actions, actions);
if (!rule)
refcount_dec(&matcher->refcount);
- mutex_unlock(&matcher->tbl->dmn->mutex);
-
return rule;
}
int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
{
struct mlx5dr_matcher *matcher = rule->matcher;
- struct mlx5dr_table *tbl = rule->matcher->tbl;
int ret;
- mutex_lock(&tbl->dmn->mutex);
-
ret = dr_rule_destroy_rule(rule);
-
- mutex_unlock(&tbl->dmn->mutex);
-
if (!ret)
refcount_dec(&matcher->refcount);
+
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 18719acb7e54..f421013b0b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -100,14 +100,10 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
return err == CQ_POLL_ERR ? err : npolled;
}
-static void dr_qp_event(struct mlx5_core_qp *mqp, int event)
-{
- pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn);
-}
-
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
struct dr_qp_init_attr *attr)
{
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
struct mlx5_wq_param wqp;
struct mlx5dr_qp *dr_qp;
@@ -180,14 +176,12 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
(__be64 *)MLX5_ADDR_OF(create_qp_in,
in, pas));
- err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen);
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+ err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn);
kfree(in);
-
- if (err) {
- mlx5_core_warn(mdev, " Can't create QP\n");
+ if (err)
goto err_in;
- }
- dr_qp->mqp.event = dr_qp_event;
dr_qp->uar = attr->uar;
return dr_qp;
@@ -204,7 +198,12 @@ err_wq:
static void dr_destroy_qp(struct mlx5_core_dev *mdev,
struct mlx5dr_qp *dr_qp)
{
- mlx5_core_destroy_qp(mdev, &dr_qp->mqp);
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, dr_qp->qpn);
+ mlx5_cmd_exec_in(mdev, destroy_qp, in);
+
kfree(dr_qp->sq.wqe_head);
mlx5_wq_destroy(&dr_qp->wq_ctrl);
kfree(dr_qp);
@@ -242,7 +241,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
MLX5_WQE_CTRL_CQ_UPDATE : 0;
wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) |
opcode);
- wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->mqp.qpn << 8);
+ wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
wq_raddr = (void *)(wq_ctrl + 1);
wq_raddr->raddr = cpu_to_be64(remote_addr);
wq_raddr->rkey = cpu_to_be32(rkey);
@@ -358,9 +357,11 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
u32 buff_offset;
int ret;
+ spin_lock(&send_ring->lock);
+
ret = dr_handle_pending_wc(dmn, send_ring);
if (ret)
- return ret;
+ goto out_unlock;
if (send_info->write.length > dmn->info.max_inline_size) {
buff_offset = (send_ring->tx_head &
@@ -378,7 +379,9 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
dr_fill_data_segs(send_ring, send_info);
dr_post_send(send_ring->qp, send_info);
- return 0;
+out_unlock:
+ spin_unlock(&send_ring->lock);
+ return ret;
}
static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
@@ -564,9 +567,7 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
send_info.remote_addr = action->rewrite.chunk->mr_addr;
send_info.rkey = action->rewrite.chunk->rkey;
- mutex_lock(&dmn->mutex);
ret = dr_postsend_icm_data(dmn, &send_info);
- mutex_unlock(&dmn->mutex);
return ret;
}
@@ -585,8 +586,10 @@ static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, rre, 1);
MLX5_SET(qpc, qpc, rwe, 1);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+ MLX5_SET(rst2init_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
}
static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
@@ -598,14 +601,15 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
- MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->mqp.qpn);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
- MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+ MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
}
static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
@@ -617,7 +621,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
- MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->mqp.qpn);
+ MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
MLX5_SET(qpc, qpc, mtu, attr->mtu);
MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
@@ -636,8 +640,10 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
MLX5_SET(qpc, qpc, min_rnr_nak, 1);
- return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
- &dr_qp->mqp);
+ MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+ MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
+
+ return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
}
static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
@@ -663,7 +669,7 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return ret;
rtr_attr.mtu = mtu;
- rtr_attr.qp_num = dr_qp->mqp.qpn;
+ rtr_attr.qp_num = dr_qp->qpn;
rtr_attr.min_rnr_timer = 12;
rtr_attr.port_num = port;
rtr_attr.sgid_index = gid_index;
@@ -689,12 +695,6 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
-static void dr_cq_event(struct mlx5_core_cq *mcq,
- enum mlx5_event event)
-{
- pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
-}
-
static void dr_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{
@@ -761,7 +761,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
- cq->mcq.event = dr_cq_event;
cq->mcq.comp = dr_cq_complete;
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
@@ -889,6 +888,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
init_attr.pdn = dmn->pdn;
init_attr.uar = dmn->uar;
init_attr.max_send_wr = QUEUE_SIZE;
+ spin_lock_init(&dmn->send_ring->lock);
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
if (!dmn->send_ring->qp) {
@@ -993,7 +993,9 @@ int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
return ret;
}
+ spin_lock(&send_ring->lock);
ret = dr_handle_pending_wc(dmn, send_ring);
+ spin_unlock(&send_ring->lock);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index c0e3a1e7389d..00c2f598f034 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -112,7 +112,7 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
{
u32 crc = crc32(0, input_data, length);
- return htonl(crc);
+ return (__force u32)htonl(crc);
}
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
@@ -869,7 +869,7 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
{
- u32 raw_ip[4];
+ __be32 raw_ip[4];
spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
@@ -961,7 +961,6 @@ static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
- spec->metadata_reg_b = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_b);
}
static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index c2fe48d7b75a..b599b6beb5b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -14,7 +14,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP;
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
if (!list_empty(&tbl->matcher_list))
last_matcher = list_last_entry(&tbl->matcher_list,
@@ -78,7 +78,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
refcount_inc(&action->refcount);
out:
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
return ret;
}
@@ -95,7 +95,7 @@ static void dr_table_uninit_fdb(struct mlx5dr_table *tbl)
static void dr_table_uninit(struct mlx5dr_table *tbl)
{
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
switch (tbl->dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
@@ -112,7 +112,7 @@ static void dr_table_uninit(struct mlx5dr_table *tbl)
break;
}
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
}
static int dr_table_init_nic(struct mlx5dr_domain *dmn,
@@ -177,7 +177,7 @@ static int dr_table_init(struct mlx5dr_table *tbl)
INIT_LIST_HEAD(&tbl->matcher_list);
- mutex_lock(&tbl->dmn->mutex);
+ mlx5dr_domain_lock(tbl->dmn);
switch (tbl->dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
@@ -201,7 +201,7 @@ static int dr_table_init(struct mlx5dr_table *tbl)
break;
}
- mutex_unlock(&tbl->dmn->mutex);
+ mlx5dr_domain_unlock(tbl->dmn);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 3fa739951b34..0883956c58c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -554,8 +554,7 @@ struct mlx5dr_match_misc2 {
u32 metadata_reg_c_1; /* metadata_reg_c_1 */
u32 metadata_reg_c_0; /* metadata_reg_c_0 */
u32 metadata_reg_a; /* metadata_reg_a */
- u32 metadata_reg_b; /* metadata_reg_b */
- u8 reserved_auto2[8];
+ u8 reserved_auto2[12];
};
struct mlx5dr_match_misc3 {
@@ -636,6 +635,7 @@ struct mlx5dr_domain_rx_tx {
u64 drop_icm_addr;
u64 default_icm_addr;
enum mlx5dr_ste_entry_type ste_type;
+ struct mutex mutex; /* protect rx/tx domain */
};
struct mlx5dr_domain_info {
@@ -660,7 +660,6 @@ struct mlx5dr_domain {
struct mlx5_uars_page *uar;
enum mlx5dr_domain_type type;
refcount_t refcount;
- struct mutex mutex; /* protect domain */
struct mlx5dr_icm_pool *ste_icm_pool;
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
@@ -814,6 +813,28 @@ struct mlx5dr_icm_chunk {
struct list_head *miss_list;
};
+static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+ mutex_lock(&nic_dmn->mutex);
+}
+
+static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+ mutex_unlock(&nic_dmn->mutex);
+}
+
+static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
+{
+ mlx5dr_domain_nic_lock(&dmn->info.rx);
+ mlx5dr_domain_nic_lock(&dmn->info.tx);
+}
+
+static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
+{
+ mlx5dr_domain_nic_unlock(&dmn->info.tx);
+ mlx5dr_domain_nic_unlock(&dmn->info.rx);
+}
+
static inline int
mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
@@ -990,7 +1011,7 @@ struct mlx5dr_qp {
struct mlx5_wq_qp wq;
struct mlx5_uars_page *uar;
struct mlx5_wq_ctrl wq_ctrl;
- struct mlx5_core_qp mqp;
+ u32 qpn;
struct {
unsigned int pc;
unsigned int cc;
@@ -1043,6 +1064,7 @@ struct mlx5dr_send_ring {
struct ib_wc wc[MAX_SEND_CQE];
u8 sync_buff[MIN_READ_SYNC];
struct mlx5dr_mr *sync_mr;
+ spinlock_t lock; /* Protect the data path of the send ring */
};
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3b3f5b9d4f95..8887b2440c7d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -576,7 +576,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action;
size_t actions_sz;
- actions_sz = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) *
+ actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) *
num_actions;
action = mlx5dr_action_create_modify_header(dr_domain, 0,
actions_sz,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index b1068500f1df..01cc00ad8acf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -36,14 +36,14 @@
int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -54,19 +54,18 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
}
EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
- u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {};
int err;
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
@@ -78,44 +77,39 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
}
EXPORT_SYMBOL(mlx5_core_create_rq);
-int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
-
MLX5_SET(modify_rq_in, in, rqn, rqn);
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
- memset(out, 0, sizeof(out));
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_rq, in);
}
EXPORT_SYMBOL(mlx5_core_modify_rq);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rq, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
- int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+ u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {};
MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
MLX5_SET(query_rq_in, in, rqn, rqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_rq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_rq);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
- u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
int err;
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
@@ -126,34 +120,30 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
return err;
}
-int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
-
MLX5_SET(modify_sq_in, in, sqn, sqn);
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_sq, in);
}
EXPORT_SYMBOL(mlx5_core_modify_sq);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_sq, in);
}
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0};
- int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
+ u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {};
MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
MLX5_SET(query_sq_in, in, sqn, sqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ return mlx5_cmd_exec_inout(dev, query_sq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_sq);
@@ -182,24 +172,13 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
-int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
- u32 *in, int inlen,
- u32 *out, int outlen)
-{
- MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
-
- return mlx5_cmd_exec(dev, in, inlen, out, outlen);
-}
-EXPORT_SYMBOL(mlx5_core_create_tir_out);
-
-int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tirn)
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
- err = mlx5_core_create_tir_out(dev, in, inlen,
- out, sizeof(out));
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev, create_tir, in, out);
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
@@ -207,35 +186,30 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
}
EXPORT_SYMBOL(mlx5_core_create_tir);
-int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
- int inlen)
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
-
MLX5_SET(modify_tir_in, in, tirn, tirn);
MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_tir, in);
}
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tir, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_tir);
-int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tisn)
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn)
{
- u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, create_tis, in, out);
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
@@ -243,33 +217,29 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
}
EXPORT_SYMBOL(mlx5_core_create_tis);
-int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
- int inlen)
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in)
{
- u32 out[MLX5_ST_SZ_DW(modify_tis_out)] = {0};
-
MLX5_SET(modify_tis_in, in, tisn, tisn);
MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_tis, in);
}
EXPORT_SYMBOL(mlx5_core_modify_tis);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tis, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
@@ -284,7 +254,7 @@ EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {};
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
@@ -293,12 +263,11 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rqt, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
@@ -383,7 +352,7 @@ static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
int curr_state, int next_state,
u16 peer_vhca, u32 peer_sq)
{
- u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
void *rqc;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
@@ -396,8 +365,7 @@ static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
- return mlx5_core_modify_rq(func_mdev, rqn,
- in, MLX5_ST_SZ_BYTES(modify_rq_in));
+ return mlx5_core_modify_rq(func_mdev, rqn, in);
}
static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
@@ -417,8 +385,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
- return mlx5_core_modify_sq(peer_mdev, sqn,
- in, MLX5_ST_SZ_BYTES(modify_sq_in));
+ return mlx5_core_modify_sq(peer_mdev, sqn, in);
}
static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 0d006224d7b0..da481a7c12f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -34,17 +34,16 @@
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
- u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
int err;
MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
if (!err)
*uarn = MLX5_GET(alloc_uar_out, out, uar);
return err;
@@ -53,12 +52,11 @@ EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
MLX5_SET(dealloc_uar_in, in, uar, uarn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 23f879da9104..c107d92dc118 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -40,10 +40,11 @@
/* Mutex to hold while enabling or disabling RoCE */
static DEFINE_MUTEX(mlx5_roce_en_lock);
-static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport, u32 *out, int outlen)
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
{
- u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
+ int err;
MLX5_SET(query_vport_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_STATE);
@@ -52,14 +53,9 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
if (vport)
MLX5_SET(query_vport_state_in, in, other_vport, 1);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
-{
- u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
-
- _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
+ if (err)
+ return 0;
return MLX5_GET(query_vport_state_out, out, state);
}
@@ -67,8 +63,7 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 other_vport, u8 state)
{
- u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
MLX5_SET(modify_vport_state_in, in, opcode,
MLX5_CMD_OP_MODIFY_VPORT_STATE);
@@ -77,13 +72,13 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
MLX5_SET(modify_vport_state_in, in, admin_state, state);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
}
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
- u32 *out, int outlen)
+ u32 *out)
{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
@@ -91,26 +86,16 @@ static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
- int inlen)
-{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
-
- MLX5_SET(modify_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
}
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline)
{
- u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
int err;
- err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
+ err = mlx5_query_nic_vport_context(mdev, vport, out);
if (!err)
*min_inline = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.min_wqe_inline_mode);
@@ -139,8 +124,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline)
{
- u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
- int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
void *nic_vport_ctx;
MLX5_SET(modify_nic_vport_context_in, in,
@@ -152,23 +136,20 @@ int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
in, nic_vport_context);
MLX5_SET(nic_vport_context, nic_vport_ctx,
min_wqe_inline_mode, min_inline);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- return mlx5_modify_nic_vport_context(mdev, in, inlen);
+ return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
}
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, bool other, u8 *addr)
{
- int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
u8 *out_addr;
- u32 *out;
int err;
- out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
nic_vport_context.permanent_address);
@@ -177,11 +158,10 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
if (!err)
ether_addr_copy(addr, &out_addr[2]);
- kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
@@ -216,8 +196,10 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
permanent_address);
ether_addr_copy(&perm_mac[2], addr);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -235,7 +217,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
if (!err)
*mtu = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.mtu);
@@ -257,8 +239,10 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
return err;
@@ -292,7 +276,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
req_list_size = max_list_size;
}
- out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+ out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
out = kzalloc(out_sz, GFP_KERNEL);
@@ -332,7 +316,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size)
{
- u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
void *nic_vport_ctx;
int max_list_size;
int in_sz;
@@ -350,7 +334,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- memset(out, 0, sizeof(out));
in = kzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -442,7 +425,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
@@ -462,7 +445,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
@@ -498,8 +481,10 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -516,7 +501,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ mlx5_query_nic_vport_context(mdev, 0, out);
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
@@ -664,7 +649,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep)
{
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
- int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
int is_group_manager;
void *out;
void *ctx;
@@ -691,7 +676,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
if (err)
goto ex;
@@ -788,7 +773,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, vport, out);
if (err)
goto out;
@@ -825,8 +810,10 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
nic_vport_context.promisc_mc, promisc_mc);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.promisc_all, promisc_all);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -865,8 +852,10 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_uc_local_lb, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
if (!err)
mlx5_core_dbg(mdev, "%s local_lb\n",
@@ -888,7 +877,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
if (err)
goto out;
@@ -925,8 +914,10 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
state);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
kvfree(in);
@@ -965,16 +956,15 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
mutex_unlock(&mlx5_roce_en_lock);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
+EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
- int vf, u8 port_num, void *out,
- size_t out_sz)
+ int vf, u8 port_num, void *out)
{
- int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
- int is_group_manager;
- void *in;
- int err;
+ int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
+ int is_group_manager;
+ void *in;
+ int err;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
in = kvzalloc(in_sz, GFP_KERNEL);
@@ -997,7 +987,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_vport_counter_in, in, port_num, port_num);
- err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
free:
kvfree(in);
return err;
@@ -1008,8 +998,8 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
u8 other_vport, u64 *rx_discard_vport_down,
u64 *tx_discard_vport_down)
{
- u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
int err;
MLX5_SET(query_vnic_env_in, in, opcode,
@@ -1018,7 +1008,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
MLX5_SET(query_vnic_env_in, in, vport_number, vport);
MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
if (err)
return err;
@@ -1035,11 +1025,10 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *req)
{
int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
- u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
int is_group_manager;
+ void *ctx;
void *in;
int err;
- void *ctx;
mlx5_core_dbg(dev, "vf %d\n", vf);
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
@@ -1047,7 +1036,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
if (!in)
return -ENOMEM;
- memset(out, 0, sizeof(out));
MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
if (other_vport) {
if (is_group_manager) {
@@ -1074,7 +1062,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
req->cap_mask1_perm);
- err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
ex:
kfree(in);
return err;
@@ -1103,8 +1091,10 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria,
MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
if (err)
mlx5_nic_vport_disable_roce(port_mdev);
@@ -1129,8 +1119,10 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
nic_vport_context.affiliated_vhca_id, 0);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria, 0);
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
if (!err)
mlx5_nic_vport_disable_roce(port_mdev);
@@ -1170,4 +1162,4 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
}
-EXPORT_SYMBOL(mlx5_eswitch_get_total_vports);
+EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 046a0cb82ed8..7a04c626a2aa 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -76,7 +76,7 @@ static int mlxfw_fsm_state_err(struct mlxfw_dev *mlxfw_dev,
case MLXFW_FSM_STATE_ERR_MAX:
MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err);
break;
- };
+ }
return mlxfw_fsm_state_errno[fsm_state_err];
};
@@ -159,7 +159,7 @@ mlxfw_fsm_reactivate_err(struct mlxfw_dev *mlxfw_dev,
case MLXFW_FSM_REACTIVATE_STATUS_MAX:
MLXFW_REACT_ERR("unexpected error", err);
break;
- };
+ }
return -EREMOTEIO;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0e86a581d45b..4aeabb35c943 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -21,6 +21,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
spectrum_acl_bloom_filter.o spectrum_acl.o \
+ spectrum_flow.o spectrum_matchall.o \
spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 9b39b8e70519..fcb88d4271bf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3203,7 +3203,7 @@ MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8,
* Size of entries do be deleted. The unit is 1 entry, regardless of entry type.
* Access: OP
*/
-MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11,
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 13,
MLXSW_REG_IEDR_REC_LEN, 0x00, false);
/* reg_iedr_rec_index_start
@@ -5526,40 +5526,37 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
- MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING,
MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
-
- __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
- MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
-};
-
-enum mlxsw_reg_htgt_discard_trap_group {
- MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
+
+ __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
};
/* reg_htgt_trap_group
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6b39978acd07..5ffa32b75e5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -25,9 +25,7 @@
#include <linux/log2.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
-#include <net/tc_act/tc_mirred.h>
#include <net/netevent.h>
-#include <net/tc_act/tc_sample.h>
#include <net/addrconf.h>
#include "spectrum.h"
@@ -582,16 +580,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool enable, u32 rate)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char mpsc_pl[MLXSW_REG_MPSC_LEN];
-
- mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
-}
-
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up)
{
@@ -1362,412 +1350,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
-static struct mlxsw_sp_port_mall_tc_entry *
-mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
- unsigned long cookie) {
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
-
- list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
- if (mall_tc_entry->cookie == cookie)
- return mall_tc_entry;
-
- return NULL;
-}
-
-static int
-mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
- const struct flow_action_entry *act,
- bool ingress)
-{
- enum mlxsw_sp_span_type span_type;
-
- if (!act->dev) {
- netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
- return -EINVAL;
- }
-
- mirror->ingress = ingress;
- span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
- true, &mirror->span_id);
-}
-
-static void
-mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
-{
- enum mlxsw_sp_span_type span_type;
-
- span_type = mirror->ingress ?
- MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
- span_type, true);
-}
-
-static int
-mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *cls,
- const struct flow_action_entry *act,
- bool ingress)
-{
- int err;
-
- if (!mlxsw_sp_port->sample)
- return -EOPNOTSUPP;
- if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
- netdev_err(mlxsw_sp_port->dev, "sample already active\n");
- return -EEXIST;
- }
- if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
- netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
- return -EOPNOTSUPP;
- }
-
- rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
- act->sample.psample_group);
- mlxsw_sp_port->sample->truncate = act->sample.truncate;
- mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
- mlxsw_sp_port->sample->rate = act->sample.rate;
-
- err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
- if (err)
- goto err_port_sample_set;
- return 0;
-
-err_port_sample_set:
- RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
- return err;
-}
-
-static void
-mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- if (!mlxsw_sp_port->sample)
- return;
-
- mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
- RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
-}
-
-static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f,
- bool ingress)
-{
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
- __be16 protocol = f->common.protocol;
- struct flow_action_entry *act;
- int err;
-
- if (!flow_offload_has_one_action(&f->rule->action)) {
- netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
- return -EOPNOTSUPP;
- }
-
- mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
- if (!mall_tc_entry)
- return -ENOMEM;
- mall_tc_entry->cookie = f->cookie;
-
- act = &f->rule->action.entries[0];
-
- if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
- struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
-
- mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
- mirror = &mall_tc_entry->mirror;
- err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
- mirror, act,
- ingress);
- } else if (act->id == FLOW_ACTION_SAMPLE &&
- protocol == htons(ETH_P_ALL)) {
- mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
- err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
- act, ingress);
- } else {
- err = -EOPNOTSUPP;
- }
-
- if (err)
- goto err_add_action;
-
- list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
- return 0;
-
-err_add_action:
- kfree(mall_tc_entry);
- return err;
-}
-
-static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f)
-{
- struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
-
- mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
- f->cookie);
- if (!mall_tc_entry) {
- netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
- return;
- }
- list_del(&mall_tc_entry->list);
-
- switch (mall_tc_entry->type) {
- case MLXSW_SP_PORT_MALL_MIRROR:
- mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
- &mall_tc_entry->mirror);
- break;
- case MLXSW_SP_PORT_MALL_SAMPLE:
- mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
- break;
- default:
- WARN_ON(1);
- }
-
- kfree(mall_tc_entry);
-}
-
-static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f,
- bool ingress)
-{
- switch (f->command) {
- case TC_CLSMATCHALL_REPLACE:
- return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
- ingress);
- case TC_CLSMATCHALL_DESTROY:
- mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
- struct flow_cls_offload *f)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
-
- switch (f->command) {
- case FLOW_CLS_REPLACE:
- return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
- case FLOW_CLS_DESTROY:
- mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
- return 0;
- case FLOW_CLS_STATS:
- return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
- case FLOW_CLS_TMPLT_CREATE:
- return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
- case FLOW_CLS_TMPLT_DESTROY:
- mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
- void *type_data,
- void *cb_priv, bool ingress)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSMATCHALL:
- if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
- type_data))
- return -EOPNOTSUPP;
-
- return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
- ingress);
- case TC_SETUP_CLSFLOWER:
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
- void *type_data,
- void *cb_priv)
-{
- return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
- cb_priv, true);
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
- void *type_data,
- void *cb_priv)
-{
- return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
- cb_priv, false);
-}
-
-static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
- void *type_data, void *cb_priv)
-{
- struct mlxsw_sp_acl_block *acl_block = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSMATCHALL:
- return 0;
- case TC_SETUP_CLSFLOWER:
- if (mlxsw_sp_acl_block_disabled(acl_block))
- return -EOPNOTSUPP;
-
- return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void mlxsw_sp_tc_block_flower_release(void *cb_priv)
-{
- struct mlxsw_sp_acl_block *acl_block = cb_priv;
-
- mlxsw_sp_acl_block_destroy(acl_block);
-}
-
-static LIST_HEAD(mlxsw_sp_block_cb_list);
-
-static int
-mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f, bool ingress)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
- struct flow_block_cb *block_cb;
- bool register_block = false;
- int err;
-
- block_cb = flow_block_cb_lookup(f->block,
- mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp);
- if (!block_cb) {
- acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, f->net);
- if (!acl_block)
- return -ENOMEM;
- block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp, acl_block,
- mlxsw_sp_tc_block_flower_release);
- if (IS_ERR(block_cb)) {
- mlxsw_sp_acl_block_destroy(acl_block);
- err = PTR_ERR(block_cb);
- goto err_cb_register;
- }
- register_block = true;
- } else {
- acl_block = flow_block_cb_priv(block_cb);
- }
- flow_block_cb_incref(block_cb);
- err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress, f->extack);
- if (err)
- goto err_block_bind;
-
- if (ingress)
- mlxsw_sp_port->ing_acl_block = acl_block;
- else
- mlxsw_sp_port->eg_acl_block = acl_block;
-
- if (register_block) {
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
- }
-
- return 0;
-
-err_block_bind:
- if (!flow_block_cb_decref(block_cb))
- flow_block_cb_free(block_cb);
-err_cb_register:
- return err;
-}
-
-static void
-mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f, bool ingress)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
- struct flow_block_cb *block_cb;
- int err;
-
- block_cb = flow_block_cb_lookup(f->block,
- mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp);
- if (!block_cb)
- return;
-
- if (ingress)
- mlxsw_sp_port->ing_acl_block = NULL;
- else
- mlxsw_sp_port->eg_acl_block = NULL;
-
- acl_block = flow_block_cb_priv(block_cb);
- err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress);
- if (!err && !flow_block_cb_decref(block_cb)) {
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- }
-}
-
-static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f)
-{
- struct flow_block_cb *block_cb;
- flow_setup_cb_t *cb;
- bool ingress;
- int err;
-
- if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
- cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
- ingress = true;
- } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
- cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
- ingress = false;
- } else {
- return -EOPNOTSUPP;
- }
-
- f->driver_block_list = &mlxsw_sp_block_cb_list;
-
- switch (f->command) {
- case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, mlxsw_sp_port,
- &mlxsw_sp_block_cb_list))
- return -EBUSY;
-
- block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
- mlxsw_sp_port, NULL);
- if (IS_ERR(block_cb))
- return PTR_ERR(block_cb);
- err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f,
- ingress);
- if (err) {
- flow_block_cb_free(block_cb);
- return err;
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
- return 0;
- case FLOW_BLOCK_UNBIND:
- mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
- f, ingress);
- block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
- if (!block_cb)
- return -ENOENT;
-
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -1791,23 +1373,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-
static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
if (!enable) {
- if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
- mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
- !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+ if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
+ mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
- mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
- mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+ mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
+ mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
} else {
- mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
- mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
+ mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
+ mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
}
return 0;
}
@@ -3695,7 +3275,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
- INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
@@ -3704,13 +3283,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
- mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
- GFP_KERNEL);
- if (!mlxsw_sp_port->sample) {
- err = -ENOMEM;
- goto err_alloc_sample;
- }
-
INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
&update_stats_cache);
@@ -3897,8 +3469,6 @@ err_dev_addr_init:
err_port_swid_set:
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
err_port_module_map:
- kfree(mlxsw_sp_port->sample);
-err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
free_netdev(dev);
@@ -3926,7 +3496,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
- kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
@@ -4418,12 +3987,17 @@ static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
-static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
- void *priv)
+void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port)
+{
+ mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
+}
+
+void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port)
{
- struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- struct psample_group *psample_group;
+ struct mlxsw_sp_port_sample *sample;
u32 size;
if (unlikely(!mlxsw_sp_port)) {
@@ -4431,36 +4005,20 @@ static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
local_port);
goto out;
}
- if (unlikely(!mlxsw_sp_port->sample)) {
- dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
- local_port);
- goto out;
- }
-
- size = mlxsw_sp_port->sample->truncate ?
- mlxsw_sp_port->sample->trunc_size : skb->len;
rcu_read_lock();
- psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
- if (!psample_group)
+ sample = rcu_dereference(mlxsw_sp_port->sample);
+ if (!sample)
goto out_unlock;
- psample_sample_packet(psample_group, skb, size,
- mlxsw_sp_port->dev->ifindex, 0,
- mlxsw_sp_port->sample->rate);
+ size = sample->truncate ? sample->trunc_size : skb->len;
+ psample_sample_packet(sample->psample_group, skb, size,
+ mlxsw_sp_port->dev->ifindex, 0, sample->rate);
out_unlock:
rcu_read_unlock();
out:
consume_skb(skb);
}
-static void mlxsw_sp_rx_listener_ptp(struct sk_buff *skb, u8 local_port,
- void *priv)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
-
- mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
-}
-
#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
@@ -4480,58 +4038,13 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Events */
MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
/* L2 traps */
- MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
- MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
- MLXSW_RXL(mlxsw_sp_rx_listener_ptp, LLDP, TRAP_TO_CPU,
- false, SP_LLDP, DISCARD),
- MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
- MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
- MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
- MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
- MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
- MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
- false),
- MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
- false),
- MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
- false),
- MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
- false),
+ MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
/* L3 traps */
- MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
- MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
false),
- MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
- false),
- MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
- MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
- MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
- MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
- MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
- MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
- MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
- false),
- MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
- false),
- MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
- false),
- MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
- false),
- MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
false),
- MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false),
- MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
@@ -4540,23 +4053,11 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
ROUTER_EXP, false),
- /* PKT Sample trap */
- MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
- false, SP_IP2ME, DISCARD),
- /* ACL trap */
- MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
/* Multicast Router Traps */
- MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
- MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
- MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
- MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
- /* PTP traps */
- MLXSW_RXL(mlxsw_sp_rx_listener_ptp, PTP0, TRAP_TO_CPU,
- false, SP_PTP0, DISCARD),
- MLXSW_SP_RXL_NO_MARK(PTP1, TRAP_TO_CPU, PTP1, false),
+ MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
};
static const struct mlxsw_listener mlxsw_sp1_listener[] = {
@@ -4585,46 +4086,12 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
for (i = 0; i < max_cpu_policers; i++) {
is_bytes = false;
switch (i) {
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
- rate = 128;
- burst_size = 7;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
- rate = 16 * 1024;
- burst_size = 10;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
rate = 1024;
burst_size = 7;
break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
- rate = 1024;
- burst_size = 7;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
- rate = 24 * 1024;
- burst_size = 12;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
- rate = 19 * 1024;
- burst_size = 12;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
- rate = 360;
- burst_size = 7;
- break;
default:
continue;
}
@@ -4659,43 +4126,12 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
for (i = 0; i < max_trap_groups; i++) {
policer_id = i;
switch (i) {
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP:
- priority = 5;
- tc = 5;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
- priority = 4;
- tc = 4;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
- priority = 3;
- tc = 3;
- break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
- priority = 2;
- tc = 2;
- break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
priority = 1;
tc = 1;
break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
- priority = 0;
- tc = 1;
- break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
tc = MLXSW_REG_HTGT_DEFAULT_TC;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ca56e72cb4b7..6f96ca50c9ba 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -109,25 +109,6 @@ struct mlxsw_sp_mid {
unsigned long *ports_in_mid; /* bits array */
};
-enum mlxsw_sp_port_mall_action_type {
- MLXSW_SP_PORT_MALL_MIRROR,
- MLXSW_SP_PORT_MALL_SAMPLE,
-};
-
-struct mlxsw_sp_port_mall_mirror_tc_entry {
- int span_id;
- bool ingress;
-};
-
-struct mlxsw_sp_port_mall_tc_entry {
- struct list_head list;
- unsigned long cookie;
- enum mlxsw_sp_port_mall_action_type type;
- union {
- struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
- };
-};
-
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
@@ -211,7 +192,7 @@ struct mlxsw_sp_port_pcpu_stats {
};
struct mlxsw_sp_port_sample {
- struct psample_group __rcu *psample_group;
+ struct psample_group *psample_group;
u32 trunc_size;
u32 rate;
bool truncate;
@@ -274,21 +255,19 @@ struct mlxsw_sp_port {
* the same localport can have
* different mapping.
*/
- /* TC handles */
- struct list_head mall_tc_list;
struct {
#define MLXSW_HW_STATS_UPDATE_TIME HZ
struct rtnl_link_stats64 stats;
struct mlxsw_sp_port_xstats xstats;
struct delayed_work update_dw;
} periodic_hw_stats;
- struct mlxsw_sp_port_sample *sample;
+ struct mlxsw_sp_port_sample __rcu *sample;
struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan;
struct mlxsw_sp_qdisc_state *qdisc;
unsigned acl_rule_count;
- struct mlxsw_sp_acl_block *ing_acl_block;
- struct mlxsw_sp_acl_block *eg_acl_block;
+ struct mlxsw_sp_flow_block *ing_flow_block;
+ struct mlxsw_sp_flow_block *eg_flow_block;
struct {
struct delayed_work shaper_dw;
struct hwtstamp_config hwtstamp_config;
@@ -472,6 +451,10 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u8 local_port, void *priv);
+void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port);
+void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port);
int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
@@ -654,17 +637,14 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index;
};
-struct mlxsw_sp_acl_block;
-struct mlxsw_sp_acl_ruleset;
-
-/* spectrum_acl.c */
-enum mlxsw_sp_acl_profile {
- MLXSW_SP_ACL_PROFILE_FLOWER,
- MLXSW_SP_ACL_PROFILE_MR,
-};
-
-struct mlxsw_sp_acl_block {
+/* spectrum_flow.c */
+struct mlxsw_sp_flow_block {
struct list_head binding_list;
+ struct {
+ struct list_head list;
+ unsigned int min_prio;
+ unsigned int max_prio;
+ } mall;
struct mlxsw_sp_acl_ruleset *ruleset_zero;
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
@@ -676,40 +656,100 @@ struct mlxsw_sp_acl_block {
struct net *net;
};
+struct mlxsw_sp_flow_block_binding {
+ struct list_head list;
+ struct net_device *dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ bool ingress;
+};
+
+static inline struct mlxsw_sp *
+mlxsw_sp_flow_block_mlxsw_sp(struct mlxsw_sp_flow_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+static inline unsigned int
+mlxsw_sp_flow_block_rule_count(const struct mlxsw_sp_flow_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+static inline void
+mlxsw_sp_flow_block_disable_inc(struct mlxsw_sp_flow_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+static inline void
+mlxsw_sp_flow_block_disable_dec(struct mlxsw_sp_flow_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+static inline bool
+mlxsw_sp_flow_block_disabled(const struct mlxsw_sp_flow_block *block)
+{
+ return block->disable_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_egress_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->egress_binding_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_ingress_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ingress_binding_count;
+}
+
+static inline bool
+mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ingress_binding_count && block->egress_binding_count;
+}
+
+struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block);
+int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
+
+/* spectrum_acl.c */
+struct mlxsw_sp_acl_ruleset;
+
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+ MLXSW_SP_ACL_PROFILE_MR,
+};
+
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
-struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
-unsigned int
-mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block);
-void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
-void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block);
-struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
- struct net *net);
-void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
-int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress,
- struct netlink_ext_ack *extack);
-int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
-bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block);
+
+int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding);
+void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile,
struct mlxsw_afk_element_usage *tmplt_elusage);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
@@ -736,7 +776,7 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct net_device *out_dev,
struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
@@ -857,22 +897,39 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
+/* spectrum_matchall.c */
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
+
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f);
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio);
/* spectrum_qdisc.c */
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index e31ec75ac035..a11d911302f1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -9,7 +9,7 @@
struct mlxsw_sp2_mr_tcam {
struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_acl_block *acl_block;
+ struct mlxsw_sp_flow_block *flow_block;
struct mlxsw_sp_acl_ruleset *ruleset4;
struct mlxsw_sp_acl_ruleset *ruleset6;
};
@@ -61,7 +61,7 @@ static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
mlxsw_sp2_mr_tcam_usage_ipv4,
ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4));
mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
- mr_tcam->acl_block,
+ mr_tcam->flow_block,
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_ACL_PROFILE_MR,
&elusage);
@@ -111,7 +111,7 @@ static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
mlxsw_sp2_mr_tcam_usage_ipv6,
ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6));
mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
- mr_tcam->acl_block,
+ mr_tcam->flow_block,
MLXSW_SP_L3_PROTO_IPV6,
MLXSW_SP_ACL_PROFILE_MR,
&elusage);
@@ -289,8 +289,8 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
int err;
mr_tcam->mlxsw_sp = mlxsw_sp;
- mr_tcam->acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, NULL);
- if (!mr_tcam->acl_block)
+ mr_tcam->flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, NULL);
+ if (!mr_tcam->flow_block)
return -ENOMEM;
err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam);
@@ -306,7 +306,7 @@ static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
err_ipv6_init:
mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
err_ipv4_init:
- mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
+ mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
return err;
}
@@ -316,7 +316,7 @@ static void mlxsw_sp2_mr_tcam_fini(void *priv)
mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam);
mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
- mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
+ mlxsw_sp_flow_block_destroy(mr_tcam->flow_block);
}
const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 01cff711bbd2..47da9ee0045d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -40,15 +40,8 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
-struct mlxsw_sp_acl_block_binding {
- struct list_head list;
- struct net_device *dev;
- struct mlxsw_sp_port *mlxsw_sp_port;
- bool ingress;
-};
-
struct mlxsw_sp_acl_ruleset_ht_key {
- struct mlxsw_sp_acl_block *block;
+ struct mlxsw_sp_flow_block *block;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
@@ -58,6 +51,8 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
unsigned int ref_count;
+ unsigned int min_prio;
+ unsigned int max_prio;
unsigned long priv[];
/* priv has to be always the last item */
};
@@ -94,49 +89,6 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid;
}
-struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
-{
- return block->mlxsw_sp;
-}
-
-unsigned int
-mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block)
-{
- return block ? block->rule_count : 0;
-}
-
-void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
-{
- if (block)
- block->disable_count++;
-}
-
-void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
-{
- if (block)
- block->disable_count--;
-}
-
-bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block)
-{
- return block->disable_count;
-}
-
-bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->egress_binding_count;
-}
-
-bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ingress_binding_count;
-}
-
-bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ingress_binding_count && block->egress_binding_count;
-}
-
static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
@@ -144,10 +96,9 @@ mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
return ruleset->ref_count == 2;
}
-static int
-mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_acl_block_binding *binding)
+int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding)
{
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
@@ -156,10 +107,9 @@ mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress);
}
-static void
-mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_acl_block_binding *binding)
+void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_flow_block_binding *binding)
{
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
@@ -168,18 +118,12 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress);
}
-static bool
-mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_acl_block *block)
-{
- return block->ruleset_zero;
-}
-
static int
mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
int err;
block->ruleset_zero = ruleset;
@@ -202,122 +146,18 @@ rollback:
static void
mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
list_for_each_entry(binding, &block->binding_list, list)
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
block->ruleset_zero = NULL;
}
-struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
- struct net *net)
-{
- struct mlxsw_sp_acl_block *block;
-
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
- INIT_LIST_HEAD(&block->binding_list);
- block->mlxsw_sp = mlxsw_sp;
- block->net = net;
- return block;
-}
-
-void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
-{
- WARN_ON(!list_empty(&block->binding_list));
- kfree(block);
-}
-
-static struct mlxsw_sp_acl_block_binding *
-mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
-{
- struct mlxsw_sp_acl_block_binding *binding;
-
- list_for_each_entry(binding, &block->binding_list, list)
- if (binding->mlxsw_sp_port == mlxsw_sp_port &&
- binding->ingress == ingress)
- return binding;
- return NULL;
-}
-
-int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_acl_block_binding *binding;
- int err;
-
- if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
- return -EEXIST;
-
- if (ingress && block->ingress_blocker_rule_count) {
- NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
- return -EOPNOTSUPP;
- }
-
- if (!ingress && block->egress_blocker_rule_count) {
- NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
- return -EOPNOTSUPP;
- }
-
- binding = kzalloc(sizeof(*binding), GFP_KERNEL);
- if (!binding)
- return -ENOMEM;
- binding->mlxsw_sp_port = mlxsw_sp_port;
- binding->ingress = ingress;
-
- if (mlxsw_sp_acl_ruleset_block_bound(block)) {
- err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
- if (err)
- goto err_ruleset_bind;
- }
-
- if (ingress)
- block->ingress_binding_count++;
- else
- block->egress_binding_count++;
- list_add(&binding->list, &block->binding_list);
- return 0;
-
-err_ruleset_bind:
- kfree(binding);
- return err;
-}
-
-int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress)
-{
- struct mlxsw_sp_acl_block_binding *binding;
-
- binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
- if (!binding)
- return -ENOENT;
-
- list_del(&binding->list);
-
- if (ingress)
- block->ingress_binding_count--;
- else
- block->egress_binding_count--;
-
- if (mlxsw_sp_acl_ruleset_block_bound(block))
- mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
-
- kfree(binding);
- return 0;
-}
-
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops,
struct mlxsw_afk_element_usage *tmplt_elusage)
{
@@ -340,7 +180,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_init;
err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv,
- tmplt_elusage);
+ tmplt_elusage, &ruleset->min_prio,
+ &ruleset->max_prio);
if (err)
goto err_ops_ruleset_add;
@@ -388,7 +229,7 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_acl_ruleset *
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
@@ -403,7 +244,7 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
@@ -421,7 +262,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block, u32 chain_index,
+ struct mlxsw_sp_flow_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile,
struct mlxsw_afk_element_usage *tmplt_elusage)
{
@@ -455,6 +296,14 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
return ops->ruleset_group_id(ruleset->priv);
}
+void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ *p_min_prio = ruleset->min_prio;
+ *p_max_prio = ruleset->max_prio;
+}
+
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
struct mlxsw_afa_block *afa_block)
@@ -584,11 +433,11 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_port *in_port;
if (!list_is_singular(&block->binding_list)) {
@@ -596,7 +445,7 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
binding = list_first_entry(&block->binding_list,
- struct mlxsw_sp_acl_block_binding, list);
+ struct mlxsw_sp_flow_block_binding, list);
in_port = binding->mlxsw_sp_port;
return mlxsw_afa_block_append_mirror(rulei->act_block,
@@ -818,7 +667,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
int err;
err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
@@ -862,18 +711,17 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ struct mlxsw_sp_flow_block *block = ruleset->ht_key.block;
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
- ruleset->ht_key.block->rule_count--;
+ block->rule_count--;
mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
mutex_unlock(&mlxsw_sp->acl->rules_lock);
if (!ruleset->ht_key.chain_index &&
mlxsw_sp_acl_ruleset_is_singular(ruleset))
- mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
- ruleset->ht_key.block);
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index e47d1d286e93..73d56012654b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -136,28 +136,35 @@ mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
- struct mlxsw_sp_port *in_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
- enum mlxsw_sp_span_type type;
+ int err;
- type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- in_port = mlxsw_sp->ports[local_in_port];
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, out_dev, p_span_id);
+ if (err)
+ return err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
+ if (err)
+ goto err_analyzed_port_get;
- return mlxsw_sp_span_mirror_add(in_port, out_dev, type,
- false, p_span_id);
+ return 0;
+
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, *p_span_id);
+ return err;
}
static void
mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
{
+ struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_port *in_port;
- enum mlxsw_sp_span_type type;
-
- type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- in_port = mlxsw_sp->ports[local_in_port];
- mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
+ mlxsw_sp_port = mlxsw_sp->ports[local_in_port];
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
}
const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index a6e30e020b5c..5c020403342f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -179,6 +179,8 @@ struct mlxsw_sp_acl_tcam_vgroup {
bool tmplt_elusage_set;
struct mlxsw_afk_element_usage tmplt_elusage;
bool vregion_rehash_enabled;
+ unsigned int *p_min_prio;
+ unsigned int *p_max_prio;
};
struct mlxsw_sp_acl_tcam_rehash_ctx {
@@ -316,13 +318,17 @@ mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_pattern *patterns,
unsigned int patterns_count,
struct mlxsw_afk_element_usage *tmplt_elusage,
- bool vregion_rehash_enabled)
+ bool vregion_rehash_enabled,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
int err;
vgroup->patterns = patterns;
vgroup->patterns_count = patterns_count;
vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
+ vgroup->p_min_prio = p_min_prio;
+ vgroup->p_max_prio = p_max_prio;
if (tmplt_elusage) {
vgroup->tmplt_elusage_set = true;
@@ -416,6 +422,21 @@ mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
return vchunk->priority;
}
+static void
+mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
+{
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+
+ if (list_empty(&vgroup->vregion_list))
+ return;
+ vregion = list_first_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
+ vregion = list_last_entry(&vgroup->vregion_list,
+ typeof(*vregion), list);
+ *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
+}
+
static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
@@ -1035,6 +1056,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
}
list_add_tail(&vchunk->list, pos);
mutex_unlock(&vregion->lock);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
return vchunk;
@@ -1066,6 +1088,7 @@ mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_vchunk_ht_params);
mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
kfree(vchunk);
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
}
static struct mlxsw_sp_acl_tcam_vchunk *
@@ -1582,14 +1605,17 @@ static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage, true);
+ tmplt_elusage, true,
+ p_min_prio, p_max_prio);
}
static void
@@ -1698,7 +1724,9 @@ static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage)
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
{
struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
int err;
@@ -1706,7 +1734,8 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
- tmplt_elusage, false);
+ tmplt_elusage, false,
+ p_min_prio, p_max_prio);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 96437992b102..a41df10ade9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -42,7 +42,8 @@ struct mlxsw_sp_acl_profile_ops {
size_t ruleset_priv_size;
int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv,
- struct mlxsw_afk_element_usage *tmplt_elusage);
+ struct mlxsw_afk_element_usage *tmplt_elusage,
+ unsigned int *p_min_prio, unsigned int *p_max_prio);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 968f0902e4fe..21bfb2f6a6f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -614,7 +614,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
- MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
new file mode 100644
index 000000000000..47b66f347ff1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <net/net_namespace.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_flow_block *
+mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
+{
+ struct mlxsw_sp_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->mall.list);
+ block->mlxsw_sp = mlxsw_sp;
+ block->net = net;
+ return block;
+}
+
+void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_flow_block_binding *
+mlxsw_sp_flow_block_lookup(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+static bool
+mlxsw_sp_flow_block_ruleset_bound(const struct mlxsw_sp_flow_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ if (ingress && block->ingress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress && block->egress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port);
+ if (err)
+ return err;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding) {
+ err = -ENOMEM;
+ goto err_binding_alloc;
+ }
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_flow_block_ruleset_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ if (ingress)
+ block->ingress_binding_count++;
+ else
+ block->egress_binding_count++;
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+err_binding_alloc:
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
+ return err;
+}
+
+static int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+
+ binding = mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (ingress)
+ block->ingress_binding_count--;
+ else
+ block->egress_binding_count--;
+
+ if (mlxsw_sp_flow_block_ruleset_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
+ return 0;
+}
+
+static int mlxsw_sp_flow_block_mall_cb(struct mlxsw_sp_flow_block *flow_block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlxsw_sp_mall_replace(mlxsw_sp, flow_block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ mlxsw_sp_mall_destroy(flow_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_flow_block_flower_cb(struct mlxsw_sp_flow_block *flow_block,
+ struct flow_cls_offload *f)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return mlxsw_sp_flower_replace(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_DESTROY:
+ mlxsw_sp_flower_destroy(mlxsw_sp, flow_block, f);
+ return 0;
+ case FLOW_CLS_STATS:
+ return mlxsw_sp_flower_stats(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_TMPLT_CREATE:
+ return mlxsw_sp_flower_tmplt_create(mlxsw_sp, flow_block, f);
+ case FLOW_CLS_TMPLT_DESTROY:
+ mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, flow_block, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_flow_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
+
+ if (mlxsw_sp_flow_block_disabled(flow_block))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return mlxsw_sp_flow_block_mall_cb(flow_block, type_data);
+ case TC_SETUP_CLSFLOWER:
+ return mlxsw_sp_flow_block_flower_cb(flow_block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlxsw_sp_tc_block_release(void *cb_priv)
+{
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
+
+ mlxsw_sp_flow_block_destroy(flow_block);
+}
+
+static LIST_HEAD(mlxsw_sp_block_cb_list);
+
+static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
+ mlxsw_sp);
+ if (!block_cb) {
+ flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, f->net);
+ if (!flow_block)
+ return -ENOMEM;
+ block_cb = flow_block_cb_alloc(mlxsw_sp_flow_block_cb,
+ mlxsw_sp, flow_block,
+ mlxsw_sp_tc_block_release);
+ if (IS_ERR(block_cb)) {
+ mlxsw_sp_flow_block_destroy(flow_block);
+ err = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ register_block = true;
+ } else {
+ flow_block = flow_block_cb_priv(block_cb);
+ }
+ flow_block_cb_incref(block_cb);
+ err = mlxsw_sp_flow_block_bind(mlxsw_sp, flow_block,
+ mlxsw_sp_port, ingress, f->extack);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_flow_block = flow_block;
+ else
+ mlxsw_sp_port->eg_flow_block = flow_block;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
+ }
+
+ return 0;
+
+err_block_bind:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+err_cb_register:
+ return err;
+}
+
+static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_flow_block *flow_block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_flow_block = NULL;
+ else
+ mlxsw_sp_port->eg_flow_block = NULL;
+
+ flow_block = flow_block_cb_priv(block_cb);
+ err = mlxsw_sp_flow_block_unbind(mlxsw_sp, flow_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ bool ingress;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ ingress = true;
+ else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ ingress = false;
+ else
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &mlxsw_sp_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return mlxsw_sp_setup_tc_block_bind(mlxsw_sp_port, f, ingress);
+ case FLOW_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_unbind(mlxsw_sp_port, f, ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 890b078851c9..51e1b3930c56 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -15,7 +15,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -30,14 +30,14 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
act = flow_action_first_entry_get(flow_action);
- if (act->hw_stats == FLOW_ACTION_HW_STATS_ANY ||
- act->hw_stats == FLOW_ACTION_HW_STATS_IMMEDIATE) {
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+ /* Nothing to do */
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) {
/* Count action is inserted first */
err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
if (err)
return err;
- } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED &&
- act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) {
+ } else {
NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
return -EOPNOTSUPP;
}
@@ -54,11 +54,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
case FLOW_ACTION_DROP: {
bool ingress;
- if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
return -EOPNOTSUPP;
}
- ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
+ ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
act->cookie, extack);
if (err) {
@@ -107,7 +107,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid;
u16 fid_index;
- if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
return -EOPNOTSUPP;
}
@@ -191,7 +191,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f,
- struct mlxsw_sp_acl_block *block)
+ struct mlxsw_sp_flow_block *block)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -372,7 +372,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f)
{
@@ -461,7 +461,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
@@ -505,8 +505,36 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
f->common.extack);
}
+static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+ unsigned int mall_min_prio;
+ unsigned int mall_max_prio;
+ int err;
+
+ err = mlxsw_sp_mall_prio_get(block, f->common.chain_index,
+ &mall_min_prio, &mall_max_prio);
+ if (err) {
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+ if (ingress && f->common.prio <= mall_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (!ingress && f->common.prio >= mall_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_rule_info *rulei;
@@ -514,6 +542,10 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule;
int err;
+ err = mlxsw_sp_flower_mall_prio_check(block, f);
+ if (err)
+ return err;
+
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
@@ -553,7 +585,7 @@ err_rule_create:
}
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -575,7 +607,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
}
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
@@ -612,7 +644,7 @@ err_rule_get_stats:
}
int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -633,7 +665,7 @@ int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
}
void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_flow_block *block,
struct flow_cls_offload *f)
{
struct mlxsw_sp_acl_ruleset *ruleset;
@@ -647,3 +679,23 @@ void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
+
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ u32 chain_index, unsigned int *p_min_prio,
+ unsigned int *p_max_prio)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset;
+
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
+ chain_index,
+ MLXSW_SP_ACL_PROFILE_FLOWER);
+ if (IS_ERR(ruleset))
+ /* In case there are no flower rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return PTR_ERR(ruleset);
+ mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
new file mode 100644
index 000000000000..f1a44a8eda55
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/flow_offload.h>
+
+#include "spectrum.h"
+#include "spectrum_span.h"
+#include "reg.h"
+
+enum mlxsw_sp_mall_action_type {
+ MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
+ MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
+};
+
+struct mlxsw_sp_mall_mirror_entry {
+ const struct net_device *to_dev;
+ int span_id;
+};
+
+struct mlxsw_sp_mall_entry {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned int priority;
+ enum mlxsw_sp_mall_action_type type;
+ bool ingress;
+ union {
+ struct mlxsw_sp_mall_mirror_entry mirror;
+ struct mlxsw_sp_port_sample sample;
+ };
+ struct rcu_head rcu;
+};
+
+static struct mlxsw_sp_mall_entry *
+mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list)
+ if (mall_entry->cookie == cookie)
+ return mall_entry;
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_parms parms;
+ enum mlxsw_sp_span_trigger trigger;
+ int err;
+
+ if (!mall_entry->mirror.to_dev) {
+ netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev,
+ &mall_entry->mirror.span_id);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
+ mall_entry->ingress);
+ if (err)
+ goto err_analyzed_port_get;
+
+ trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ parms.span_id = mall_entry->mirror.span_id;
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
+ &parms);
+ if (err)
+ goto err_agent_bind;
+
+ return 0;
+
+err_agent_bind:
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+err_analyzed_port_get:
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+ return err;
+}
+
+static void
+mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_trigger_parms parms;
+ enum mlxsw_sp_span_trigger trigger;
+
+ trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
+ MLXSW_SP_SPAN_TRIGGER_EGRESS;
+ parms.span_id = mall_entry->mirror.span_id;
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
+ mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+}
+
+static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable, u32 rate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char mpsc_pl[MLXSW_REG_MPSC_LEN];
+
+ mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
+}
+
+static int
+mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ int err;
+
+ if (rtnl_dereference(mlxsw_sp_port->sample)) {
+ netdev_err(mlxsw_sp_port->dev, "sample already active\n");
+ return -EEXIST;
+ }
+ rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
+
+ err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
+ mall_entry->sample.rate);
+ if (err)
+ goto err_port_sample_set;
+ return 0;
+
+err_port_sample_set:
+ RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+ return err;
+}
+
+static void
+mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ if (!mlxsw_sp_port->sample)
+ return;
+
+ mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
+ RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
+}
+
+static int
+mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
+ case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
+ return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+static void
+mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mall_entry *mall_entry)
+{
+ switch (mall_entry->type) {
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+ mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
+ break;
+ case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
+ mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ if (list_empty(&block->mall.list))
+ return;
+ block->mall.min_prio = UINT_MAX;
+ block->mall.max_prio = 0;
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ if (mall_entry->priority < block->mall.min_prio)
+ block->mall.min_prio = mall_entry->priority;
+ if (mall_entry->priority > block->mall.max_prio)
+ block->mall.max_prio = mall_entry->priority;
+ }
+}
+
+int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_mall_entry *mall_entry;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ unsigned int flower_min_prio;
+ unsigned int flower_max_prio;
+ bool flower_prio_valid;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (f->common.chain_index) {
+ NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
+ &flower_min_prio, &flower_max_prio);
+ if (err) {
+ if (err != -ENOENT) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+ flower_prio_valid = false;
+ /* No flower filters are installed in specified chain. */
+ } else {
+ flower_prio_valid = true;
+ }
+
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+ if (!mall_entry)
+ return -ENOMEM;
+ mall_entry->cookie = f->cookie;
+ mall_entry->priority = f->common.prio;
+ mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
+
+ act = &f->rule->action.entries[0];
+
+ if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
+ if (flower_prio_valid && mall_entry->ingress &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid && !mall_entry->ingress &&
+ mall_entry->priority <= flower_max_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
+ mall_entry->mirror.to_dev = act->dev;
+ } else if (act->id == FLOW_ACTION_SAMPLE &&
+ protocol == htons(ETH_P_ALL)) {
+ if (!mall_entry->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (flower_prio_valid &&
+ mall_entry->priority >= flower_min_prio) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
+ NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
+ mall_entry->sample.psample_group = act->sample.psample_group;
+ mall_entry->sample.truncate = act->sample.truncate;
+ mall_entry->sample.trunc_size = act->sample.trunc_size;
+ mall_entry->sample.rate = act->sample.rate;
+ } else {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
+ mall_entry);
+ if (err)
+ goto rollback;
+ }
+
+ block->rule_count++;
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count++;
+ else
+ block->ingress_blocker_rule_count++;
+ list_add_tail(&mall_entry->list, &block->mall.list);
+ mlxsw_sp_mall_prio_update(block);
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
+errout:
+ kfree(mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct mlxsw_sp_flow_block_binding *binding;
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
+ if (!mall_entry) {
+ NL_SET_ERR_MSG(f->common.extack, "Entry not found");
+ return;
+ }
+
+ list_del(&mall_entry->list);
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count--;
+ else
+ block->ingress_blocker_rule_count--;
+ block->rule_count--;
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
+ kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
+ mlxsw_sp_mall_prio_update(block);
+}
+
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+ int err;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list) {
+ err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
+ list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall.list, list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+}
+
+int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
+ unsigned int *p_min_prio, unsigned int *p_max_prio)
+{
+ if (chain_index || list_empty(&block->mall.list))
+ /* In case there are no matchall rules, the caller
+ * receives -ENOENT to indicate there is no need
+ * to check the priorities.
+ */
+ return -ENOENT;
+ *p_min_prio = block->mall.min_prio;
+ *p_max_prio = block->mall.max_prio;
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index d5bca1be3ef5..770de0222e7b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2999,6 +2999,7 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
+ val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
}
return jhash(&val, sizeof(val), seed);
default:
@@ -3012,11 +3013,14 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
{
unsigned int val = fib6_entry->nrt6;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
- struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+ struct net_device *dev = fib6_nh->fib_nh_dev;
+ struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
+
val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
+ val ^= jhash(gw, sizeof(*gw), seed);
}
return jhash(&val, sizeof(val), seed);
@@ -4999,9 +5003,11 @@ static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
/* Packets with link-local destination IP arriving to the router
* are trapped to the CPU, so no need to program specific routes
- * for them.
+ * for them. Only allow prefix routes (usually one fe80::/64) so
+ * that packets are trapped for the right reason.
*/
- if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
+ if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
+ (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
return true;
/* Multicast routes aren't supported, so ignore them. Neighbour
@@ -7568,7 +7574,7 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
- struct net_device *br_dev = rif->dev;
+ struct net_device *br_dev;
u16 vid;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 9fb2e9d93929..304eb8c3d8bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -3,6 +3,8 @@
#include <linux/if_bridge.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
#include <net/arp.h>
@@ -19,9 +21,27 @@
struct mlxsw_sp_span {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
+ struct list_head analyzed_ports_list;
+ struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
+ struct list_head trigger_entries_list;
atomic_t active_entries_count;
int entries_count;
- struct mlxsw_sp_span_entry entries[0];
+ struct mlxsw_sp_span_entry entries[];
+};
+
+struct mlxsw_sp_span_analyzed_port {
+ struct list_head list; /* Member of analyzed_ports_list */
+ refcount_t ref_count;
+ u8 local_port;
+ bool ingress;
+};
+
+struct mlxsw_sp_span_trigger_entry {
+ struct list_head list; /* Member of trigger_entries_list */
+ refcount_t ref_count;
+ u8 local_port;
+ enum mlxsw_sp_span_trigger trigger;
+ struct mlxsw_sp_span_trigger_parms parms;
};
static void mlxsw_sp_span_respin_work(struct work_struct *work);
@@ -48,15 +68,14 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
span->entries_count = entries_count;
atomic_set(&span->active_entries_count, 0);
+ mutex_init(&span->analyzed_ports_lock);
+ INIT_LIST_HEAD(&span->analyzed_ports_list);
+ INIT_LIST_HEAD(&span->trigger_entries_list);
span->mlxsw_sp = mlxsw_sp;
mlxsw_sp->span = span;
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- INIT_LIST_HEAD(&curr->bound_ports_list);
- curr->id = i;
- }
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++)
+ mlxsw_sp->span->entries[i].id = i;
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
@@ -68,16 +87,13 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- int i;
cancel_work_sync(&mlxsw_sp->span->work);
devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
- }
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
+ mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
kfree(mlxsw_sp->span);
}
@@ -130,7 +146,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
.can_handle = mlxsw_sp_port_dev_check,
- .parms = mlxsw_sp_span_entry_phys_parms,
+ .parms_set = mlxsw_sp_span_entry_phys_parms,
.configure = mlxsw_sp_span_entry_phys_configure,
.deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
};
@@ -418,7 +434,7 @@ mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
.can_handle = netif_is_gretap,
- .parms = mlxsw_sp_span_entry_gretap4_parms,
+ .parms_set = mlxsw_sp_span_entry_gretap4_parms,
.configure = mlxsw_sp_span_entry_gretap4_configure,
.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
};
@@ -519,7 +535,7 @@ mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
.can_handle = netif_is_ip6gretap,
- .parms = mlxsw_sp_span_entry_gretap6_parms,
+ .parms_set = mlxsw_sp_span_entry_gretap6_parms,
.configure = mlxsw_sp_span_entry_gretap6_configure,
.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
};
@@ -575,7 +591,7 @@ mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
.can_handle = mlxsw_sp_span_vlan_can_handle,
- .parms = mlxsw_sp_span_entry_vlan_parms,
+ .parms_set = mlxsw_sp_span_entry_vlan_parms,
.configure = mlxsw_sp_span_entry_vlan_configure,
.deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
};
@@ -612,7 +628,7 @@ mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
}
static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
- .parms = mlxsw_sp_span_entry_nop_parms,
+ .parms_set = mlxsw_sp_span_entry_nop_parms,
.configure = mlxsw_sp_span_entry_nop_configure,
.deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
};
@@ -622,18 +638,27 @@ mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry,
struct mlxsw_sp_span_parms sparms)
{
- if (sparms.dest_port) {
- if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
- netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
- sparms.dest_port->dev->name);
- sparms.dest_port = NULL;
- } else if (span_entry->ops->configure(span_entry, sparms)) {
- netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
- sparms.dest_port->dev->name);
- sparms.dest_port = NULL;
- }
+ int err;
+
+ if (!sparms.dest_port)
+ goto set_parms;
+
+ if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
+ netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ goto set_parms;
+ }
+
+ err = span_entry->ops->configure(span_entry, sparms);
+ if (err) {
+ netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ goto set_parms;
}
+set_parms:
span_entry->parms = sparms;
}
@@ -655,7 +680,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
/* find a free entry to use */
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- if (!mlxsw_sp->span->entries[i].ref_count) {
+ if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
span_entry = &mlxsw_sp->span->entries[i];
break;
}
@@ -665,7 +690,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
atomic_inc(&mlxsw_sp->span->active_entries_count);
span_entry->ops = ops;
- span_entry->ref_count = 1;
+ refcount_set(&span_entry->ref_count, 1);
span_entry->to_dev = to_dev;
mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
@@ -688,7 +713,7 @@ mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- if (curr->ref_count && curr->to_dev == to_dev)
+ if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
return curr;
}
return NULL;
@@ -709,7 +734,7 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- if (curr->ref_count && curr->id == span_id)
+ if (refcount_read(&curr->ref_count) && curr->id == span_id)
return curr;
}
return NULL;
@@ -726,7 +751,7 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
if (span_entry) {
/* Already exists, just take a reference */
- span_entry->ref_count++;
+ refcount_inc(&span_entry->ref_count);
return span_entry;
}
@@ -736,32 +761,13 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry)
{
- WARN_ON(!span_entry->ref_count);
- if (--span_entry->ref_count == 0)
+ if (refcount_dec_and_test(&span_entry->ref_count))
mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
}
-static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_inspected_port *p;
- int i;
-
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
-
- list_for_each_entry(p, &curr->bound_ports_list, list)
- if (p->local_port == port->local_port &&
- p->type == MLXSW_SP_SPAN_EGRESS)
- return true;
- }
-
- return false;
-}
-
static int
-mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sbib_pl[MLXSW_REG_SBIB_LEN];
@@ -780,20 +786,54 @@ mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
+static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port)
+{
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+
+ mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+}
+
+static struct mlxsw_sp_span_analyzed_port *
+mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
+ bool ingress)
+{
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+
+ list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
+ if (analyzed_port->local_port == local_port &&
+ analyzed_port->ingress == ingress)
+ return analyzed_port;
+ }
+
+ return NULL;
+}
+
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ int err = 0;
+
/* If port is egress mirrored, the shared buffer size should be
* updated according to the mtu value
*/
- if (mlxsw_sp_span_is_egress_mirror(port))
- return mlxsw_sp_span_port_buffsize_update(port, mtu);
- return 0;
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
+ false))
+ err = mlxsw_sp_span_port_buffer_update(port, mtu);
+
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ return err;
}
void mlxsw_sp_span_speed_update_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
span.speed_update_dw);
@@ -801,237 +841,368 @@ void mlxsw_sp_span_speed_update_work(struct work_struct *work)
/* If port is egress mirrored, the shared buffer size should be
* updated according to the speed value.
*/
- if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
- mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
- mlxsw_sp_port->dev->mtu);
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
+
+ if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ mlxsw_sp_port->local_port, false))
+ mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
+ mlxsw_sp_port->dev->mtu);
+
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
}
-static struct mlxsw_sp_span_inspected_port *
-mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- struct mlxsw_sp_port *port,
- bool bind)
+static const struct mlxsw_sp_span_entry_ops *
+mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
{
- struct mlxsw_sp_span_inspected_port *p;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
+ if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
+ return mlxsw_sp_span_entry_types[i];
- list_for_each_entry(p, &span_entry->bound_ports_list, list)
- if (type == p->type &&
- port->local_port == p->local_port &&
- bind == p->bound)
- return p;
return NULL;
}
-static int
-mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
- int pa_id = span_entry->id;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp *mlxsw_sp;
+ int i, err;
+
+ span = container_of(work, struct mlxsw_sp_span, work);
+ mlxsw_sp = span->mlxsw_sp;
+
+ rtnl_lock();
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
+ struct mlxsw_sp_span_parms sparms = {NULL};
+
+ if (!refcount_read(&curr->ref_count))
+ continue;
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+ err = curr->ops->parms_set(curr->to_dev, &sparms);
+ if (err)
+ continue;
+
+ if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
+ mlxsw_sp_span_entry_deconfigure(curr);
+ mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
+ }
+ }
+ rtnl_unlock();
}
-static int
-mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int i;
+ if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ return;
+ mlxsw_core_schedule_work(&mlxsw_sp->span->work);
+}
+
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev, int *p_span_id)
+{
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp_span_parms sparms;
int err;
- /* A given (source port, direction) can only be bound to one analyzer,
- * so if a binding is requested, check for conflicts.
- */
- if (bind)
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr =
- &mlxsw_sp->span->entries[i];
-
- if (mlxsw_sp_span_entry_bound_port_find(curr, type,
- port, bind))
- return -EEXIST;
- }
+ ASSERT_RTNL();
- /* if it is an egress SPAN, bind a shared buffer to it */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
- if (err)
- return err;
+ ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
+ if (!ops) {
+ dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
+ return -EOPNOTSUPP;
}
- if (bind) {
- err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- true);
- if (err)
- goto err_port_bind;
- }
+ memset(&sparms, 0, sizeof(sparms));
+ err = ops->parms_set(to_dev, &sparms);
+ if (err)
+ return err;
- inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
- if (!inspected_port) {
- err = -ENOMEM;
- goto err_inspected_port_alloc;
- }
- inspected_port->local_port = port->local_port;
- inspected_port->type = type;
- inspected_port->bound = bind;
- list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
+ span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
+ if (!span_entry)
+ return -ENOBUFS;
+
+ *p_span_id = span_entry->id;
return 0;
+}
-err_inspected_port_alloc:
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
-err_port_bind:
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ ASSERT_RTNL();
+
+ span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
+ if (WARN_ON_ONCE(!span_entry))
+ return;
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+}
+
+static struct mlxsw_sp_span_analyzed_port *
+mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ int err;
+
+ analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
+ if (!analyzed_port)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&analyzed_port->ref_count, 1);
+ analyzed_port->local_port = mlxsw_sp_port->local_port;
+ analyzed_port->ingress = ingress;
+ list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
+
+ /* An egress mirror buffer should be allocated on the egress port which
+ * does the mirroring.
+ */
+ if (!ingress) {
+ u16 mtu = mlxsw_sp_port->dev->mtu;
+
+ err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_buffer_update;
}
- return err;
+
+ return analyzed_port;
+
+err_buffer_update:
+ list_del(&analyzed_port->list);
+ kfree(analyzed_port);
+ return ERR_PTR(err);
}
static void
-mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
+mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_analyzed_port *
+ analyzed_port)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
+ struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
- inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
- port, bind);
- if (!inspected_port)
- return;
+ /* Remove egress mirror buffer now that port is no longer analyzed
+ * at egress.
+ */
+ if (!analyzed_port->ingress)
+ mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
+ analyzed_port->local_port);
+
+ list_del(&analyzed_port->list);
+ kfree(analyzed_port);
+}
+
+int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
- /* remove the SBIB buffer if it was egress SPAN */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ local_port, ingress);
+ if (analyzed_port) {
+ refcount_inc(&analyzed_port->ref_count);
+ goto out_unlock;
}
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+ analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
+ mlxsw_sp_port,
+ ingress);
+ if (IS_ERR(analyzed_port))
+ err = PTR_ERR(analyzed_port);
- list_del(&inspected_port->list);
- kfree(inspected_port);
+out_unlock:
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
+ return err;
}
-static const struct mlxsw_sp_span_entry_ops *
-mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *to_dev)
+void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
- size_t i;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_span_analyzed_port *analyzed_port;
+ u8 local_port = mlxsw_sp_port->local_port;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
- if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
- return mlxsw_sp_span_entry_types[i];
+ mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
- return NULL;
+ analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
+ local_port, ingress);
+ if (WARN_ON_ONCE(!analyzed_port))
+ goto out_unlock;
+
+ if (!refcount_dec_and_test(&analyzed_port->ref_count))
+ goto out_unlock;
+
+ mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
}
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- const struct net_device *to_dev,
- enum mlxsw_sp_span_type type, bool bind,
- int *p_span_id)
+static int
+__mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry, bool enable)
{
- struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
- const struct mlxsw_sp_span_entry_ops *ops;
- struct mlxsw_sp_span_parms sparms = {NULL};
- struct mlxsw_sp_span_entry *span_entry;
- int err;
-
- ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
- if (!ops) {
- netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
- return -EOPNOTSUPP;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ enum mlxsw_reg_mpar_i_e i_e;
+
+ switch (trigger_entry->trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
+ break;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
}
- err = ops->parms(to_dev, &sparms);
- if (err)
- return err;
+ mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
+ trigger_entry->parms.span_id);
+ return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
- span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
- if (!span_entry)
- return -ENOBUFS;
+static int
+mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true);
+}
- netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
- span_entry->id);
+static void
+mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
+{
+ __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false);
+}
- err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
+static struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms
+ *parms)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+ int err;
+
+ trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
+ if (!trigger_entry)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&trigger_entry->ref_count, 1);
+ trigger_entry->local_port = mlxsw_sp_port->local_port;
+ trigger_entry->trigger = trigger;
+ memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
+ list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
+
+ err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry);
if (err)
- goto err_port_bind;
+ goto err_trigger_entry_bind;
- *p_span_id = span_entry->id;
- return 0;
+ return trigger_entry;
-err_port_bind:
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
- return err;
+err_trigger_entry_bind:
+ list_del(&trigger_entry->list);
+ kfree(trigger_entry);
+ return ERR_PTR(err);
}
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
- enum mlxsw_sp_span_type type, bool bind)
+static void
+mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
+ struct mlxsw_sp_span_trigger_entry *
+ trigger_entry)
{
- struct mlxsw_sp_span_entry *span_entry;
+ mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry);
+ list_del(&trigger_entry->list);
+ kfree(trigger_entry);
+}
- span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
- if (!span_entry) {
- netdev_err(from->dev, "no span entry found\n");
- return;
+static struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
+ if (trigger_entry->trigger == trigger &&
+ trigger_entry->local_port == mlxsw_sp_port->local_port)
+ return trigger_entry;
}
- netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
- span_entry->id);
- mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
+ return NULL;
}
-static void mlxsw_sp_span_respin_work(struct work_struct *work)
+int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms)
{
- struct mlxsw_sp_span *span;
- struct mlxsw_sp *mlxsw_sp;
- int i, err;
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+ int err = 0;
- span = container_of(work, struct mlxsw_sp_span, work);
- mlxsw_sp = span->mlxsw_sp;
+ ASSERT_RTNL();
- rtnl_lock();
- for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
- struct mlxsw_sp_span_parms sparms = {NULL};
+ if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
+ return -EINVAL;
- if (!curr->ref_count)
- continue;
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (trigger_entry) {
+ if (trigger_entry->parms.span_id != parms->span_id)
+ return -EINVAL;
+ refcount_inc(&trigger_entry->ref_count);
+ goto out;
+ }
- err = curr->ops->parms(curr->to_dev, &sparms);
- if (err)
- continue;
+ trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port,
+ parms);
+ if (IS_ERR(trigger_entry))
+ err = PTR_ERR(trigger_entry);
- if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
- mlxsw_sp_span_entry_deconfigure(curr);
- mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
- }
- }
- rtnl_unlock();
+out:
+ return err;
}
-void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms)
{
- if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+ ASSERT_RTNL();
+
+ if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
+ parms->span_id)))
return;
- mlxsw_core_schedule_work(&mlxsw_sp->span->work);
+
+ trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+ trigger,
+ mlxsw_sp_port);
+ if (WARN_ON_ONCE(!trigger_entry))
+ return;
+
+ if (!refcount_dec_and_test(&trigger_entry->ref_count))
+ return;
+
+ mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 59724335525f..9f6dd2d0f4e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -6,26 +6,13 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/refcount.h>
#include "spectrum_router.h"
struct mlxsw_sp;
struct mlxsw_sp_port;
-enum mlxsw_sp_span_type {
- MLXSW_SP_SPAN_EGRESS,
- MLXSW_SP_SPAN_INGRESS
-};
-
-struct mlxsw_sp_span_inspected_port {
- struct list_head list;
- enum mlxsw_sp_span_type type;
- u8 local_port;
-
- /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
- bool bound;
-};
-
struct mlxsw_sp_span_parms {
struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
unsigned int ttl;
@@ -36,21 +23,29 @@ struct mlxsw_sp_span_parms {
u16 vid;
};
+enum mlxsw_sp_span_trigger {
+ MLXSW_SP_SPAN_TRIGGER_INGRESS,
+ MLXSW_SP_SPAN_TRIGGER_EGRESS,
+};
+
+struct mlxsw_sp_span_trigger_parms {
+ int span_id;
+};
+
struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_entry {
const struct net_device *to_dev;
const struct mlxsw_sp_span_entry_ops *ops;
struct mlxsw_sp_span_parms parms;
- struct list_head bound_ports_list;
- int ref_count;
+ refcount_t ref_count;
int id;
};
struct mlxsw_sp_span_entry_ops {
bool (*can_handle)(const struct net_device *to_dev);
- int (*parms)(const struct net_device *to_dev,
- struct mlxsw_sp_span_parms *sparmsp);
+ int (*parms_set)(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp);
int (*configure)(struct mlxsw_sp_span_entry *span_entry,
struct mlxsw_sp_span_parms sparms);
void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry);
@@ -60,12 +55,6 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- const struct net_device *to_dev,
- enum mlxsw_sp_span_type type,
- bool bind, int *p_span_id);
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
- enum mlxsw_sp_span_type type, bool bind);
struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev);
@@ -76,4 +65,21 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
void mlxsw_sp_span_speed_update_work(struct work_struct *work);
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev, int *p_span_id);
+void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id);
+int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms);
+void
+mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_span_trigger trigger,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_span_trigger_parms *parms);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index fbf714d027d8..157a42c63066 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -12,6 +12,24 @@
#include "spectrum.h"
#include "spectrum_trap.h"
+struct mlxsw_sp_trap_policer_item {
+ struct devlink_trap_policer policer;
+ u16 hw_id;
+};
+
+struct mlxsw_sp_trap_group_item {
+ struct devlink_trap_group group;
+ u16 hw_group_id;
+ u8 priority;
+};
+
+#define MLXSW_SP_TRAP_LISTENERS_MAX 3
+
+struct mlxsw_sp_trap_item {
+ struct devlink_trap trap;
+ struct mlxsw_listener listeners_arr[MLXSW_SP_TRAP_LISTENERS_MAX];
+};
+
/* All driver-specific traps must be documented in
* Documentation/networking/devlink/mlxsw.rst
*/
@@ -107,8 +125,8 @@ static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
-static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
- void *trap_ctx)
+static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
{
struct devlink_port *in_devlink_port;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -121,7 +139,7 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
if (err)
- return;
+ return err;
devlink = priv_to_devlink(mlxsw_sp->core);
in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
@@ -129,10 +147,71 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
skb_push(skb, ETH_HLEN);
devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, NULL);
skb_pull(skb, ETH_HLEN);
- skb->offload_fwd_mark = 1;
+
+ return 0;
+}
+
+static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
netif_receive_skb(skb);
}
+static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ skb->offload_fwd_mark = 1;
+ mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+}
+
+static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ skb->offload_l3_fwd_mark = 1;
+ skb->offload_fwd_mark = 1;
+ mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+}
+
+static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ /* The PTP handler expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_ptp_receive(mlxsw_sp, skb, local_port);
+}
+
+static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ int err;
+
+ err = __mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
+ if (err)
+ return;
+
+ /* The sample handler expects skb->data to point to the start of the
+ * Ethernet header.
+ */
+ skb_push(skb, ETH_HLEN);
+ mlxsw_sp_sample_receive(mlxsw_sp, skb, local_port);
+}
+
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
@@ -154,6 +233,11 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_CONTROL(_id, _group_id, _action) \
+ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
MLXSW_RXL_DIS(mlxsw_sp_rx_drop_listener, DISCARD_##_id, \
TRAP_EXCEPTION_TO_CPU, false, SP_##_group_id, \
@@ -165,9 +249,21 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
SET_FW_DEFAULT, SP_##_dis_group_id)
#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
- MLXSW_RXL(mlxsw_sp_rx_exception_listener, _id, \
+ MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, \
_action, false, SP_##_group_id, SET_FW_DEFAULT)
+#define MLXSW_SP_RXL_NO_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_no_mark_listener, _id, _action, \
+ _is_ctrl, SP_##_group_id, DISCARD)
+
+#define MLXSW_SP_RXL_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, _action, _is_ctrl, \
+ SP_##_group_id, DISCARD)
+
+#define MLXSW_SP_RXL_L3_MARK(_id, _group_id, _action, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_l3_mark_listener, _id, _action, _is_ctrl, \
+ SP_##_group_id, DISCARD)
+
#define MLXSW_SP_TRAP_POLICER(_id, _rate, _burst) \
DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
MLXSW_REG_QPCR_HIGHEST_CIR, \
@@ -176,149 +272,753 @@ static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
1 << MLXSW_REG_QPCR_LOWEST_CBS)
/* Ordered by policer identifier */
-static const struct devlink_trap_policer mlxsw_sp_trap_policers_arr[] = {
- MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+static const struct mlxsw_sp_trap_policer_item
+mlxsw_sp_trap_policer_items_arr[] = {
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(2, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(3, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(4, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(6, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 1024),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(9, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(11, 360, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(12, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(13, 128, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 128),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 4096),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 4096),
+ },
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
+ },
};
-static const struct devlink_trap_group mlxsw_sp_trap_groups_arr[] = {
- DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 1),
- DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
- DEVLINK_TRAP_GROUP_GENERIC(TUNNEL_DROPS, 1),
- DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 1),
+static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(TUNNEL_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 1),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(STP, 2),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LACP, 3),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LLDP, 4),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(MC_SNOOPING, 5),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_MC_SNOOPING,
+ .priority = 3,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(DHCP, 6),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 7),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BFD, 8),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BFD,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(OSPF, 9),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BGP, 10),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP,
+ .priority = 4,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(VRRP, 11),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PIM, 12),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(UC_LB, 13),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 14),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PTP_EVENT, 16),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
+ .priority = 5,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(PTP_GENERAL, 17),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+ .priority = 2,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_PKT_SAMPLE,
+ .priority = 0,
+ },
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 18),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_FLOW_LOGGING,
+ .priority = 4,
+ },
};
-static const struct devlink_trap mlxsw_sp_traps_arr[] = {
- MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
- MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
- MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
- MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
- MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
- MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
- MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
- MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS),
- MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS),
- MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS),
- MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS),
- MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS),
- MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS),
- MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS),
- MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE, L3_DROPS),
- MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(RPF, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS),
- MLXSW_SP_TRAP_DRIVER_DROP(IRIF_DISABLED, L3_DROPS),
- MLXSW_SP_TRAP_DRIVER_DROP(ERIF_DISABLED, L3_DROPS),
- MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
- MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
- MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
- MLXSW_SP_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
- DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
- MLXSW_SP_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
- DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW,
+ L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE,
+ L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(MTUERROR, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(TTLERROR, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(RPF, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(RPF, L3_EXCEPTIONS, TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_EXCEPTIONS,
+ TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4,
+ L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS,
+ L3_EXCEPTIONS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6,
+ L3_EXCEPTIONS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DRIVER_DROP(IRIF_DISABLED, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DRIVER_DROP(ERIF_DISABLED, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR,
+ TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP,
+ ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ .listeners_arr = {
+ MLXSW_SP_RXL_ACL_DISCARD(INGRESS_ACL, ACL_DISCARDS,
+ DUMMY),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP,
+ ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ .listeners_arr = {
+ MLXSW_SP_RXL_ACL_DISCARD(EGRESS_ACL, ACL_DISCARDS,
+ DUMMY),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(STP, STP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(STP, STP, TRAP_TO_CPU, true),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LACP, LACP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(LACP, LACP, TRAP_TO_CPU, true),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU,
+ false, SP_LLDP, DISCARD),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_QUERY, MC_SNOOPING, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IGMP_QUERY, MC_SNOOPING,
+ MIRROR_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V1_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V2_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V3_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IGMP_V2_LEAVE, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, MC_SNOOPING,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_QUERY, MC_SNOOPING, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY,
+ MC_SNOOPING, MIRROR_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V1_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V2_REPORT, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(MLD_V1_DONE, MC_SNOOPING,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE,
+ MC_SNOOPING, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_DHCP, DHCP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_DHCP, DHCP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DHCP, DHCP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_DHCP, DHCP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ARPBC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ARPUC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(ARP_OVERLAY, NEIGH_DISCOVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_NEIGH_SOLICIT,
+ NEIGH_DISCOVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION,
+ NEIGH_DISCOVERY, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_NEIGH_ADVERT,
+ NEIGH_DISCOVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISEMENT,
+ NEIGH_DISCOVERY, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_BFD, BFD, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_BFD, BFD, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_BFD, BFD, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_BFD, BFD, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_OSPF, OSPF, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_OSPF, OSPF, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_OSPF, OSPF, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_OSPF, OSPF, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_BGP, BGP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_BGP, BGP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_BGP, BGP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_BGP, BGP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_VRRP, VRRP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, VRRP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_VRRP, VRRP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, VRRP, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_PIM, PIM, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV4_PIM, PIM, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_PIM, PIM, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_PIM, PIM, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(UC_LB, UC_LB, MIRROR),
+ .listeners_arr = {
+ MLXSW_SP_RXL_L3_MARK(LBERROR, LBERROR, MIRROR_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IP2ME, IP2ME, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_UC_DIP_LINK_LOCAL_SCOPE,
+ LOCAL_DELIVERY, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, IP2ME,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV4_ROUTER_ALERT, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, IP2ME, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ /* IPV6_ROUTER_ALERT is defined in uAPI as 22, but it is not
+ * used in this file, so undefine it.
+ */
+ #undef IPV6_ROUTER_ALERT
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_ALERT, LOCAL_DELIVERY,
+ TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, IP2ME, TRAP_TO_CPU,
+ false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DIP_ALL_NODES, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_DIP_ALL_ROUTERS, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_SOLICIT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_ROUTER_ADVERT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISEMENT, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(IPV6_REDIRECT, IPV6, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, IPV6,
+ TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(PTP_EVENT, PTP_EVENT, TRAP),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_ptp_listener, PTP0, TRAP_TO_CPU,
+ false, SP_PTP0, DISCARD),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(PTP_GENERAL, PTP_GENERAL, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(PTP1, PTP1, TRAP_TO_CPU, false),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_SAMPLE, ACL_SAMPLE,
+ MIRROR),
+ .listeners_arr = {
+ MLXSW_RXL(mlxsw_sp_rx_sample_listener, PKT_SAMPLE,
+ MIRROR_TO_CPU, false, SP_PKT_SAMPLE, DISCARD),
+ },
+ },
+ {
+ .trap = MLXSW_SP_TRAP_CONTROL(FLOW_ACTION_TRAP, ACL_TRAP, TRAP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_NO_MARK(ACL0, FLOW_LOGGING, TRAP_TO_CPU,
+ false),
+ },
+ },
};
-static const struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
- MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DISCARDS),
- MLXSW_SP_RXL_EXCEPTION(MTUERROR, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(TTLERROR, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(RPF, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_DISCARDS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, L3_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, L3_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
- MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
- MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, TUNNEL_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, TUNNEL_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
- TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
- MLXSW_SP_RXL_ACL_DISCARD(INGRESS_ACL, ACL_DISCARDS, DUMMY),
- MLXSW_SP_RXL_ACL_DISCARD(EGRESS_ACL, ACL_DISCARDS, DUMMY),
-};
+static struct mlxsw_sp_trap_policer_item *
+mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
-/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
- * be mapped to the same devlink trap. Order is according to
- * 'mlxsw_sp_listeners_arr'.
- */
-static const u16 mlxsw_sp_listener_devlink_map[] = {
- DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
- DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH,
- DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
- DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER,
- DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
- DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
- DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
- DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
- DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
- DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
- DEVLINK_TRAP_GENERIC_ID_DIP_LB,
- DEVLINK_TRAP_GENERIC_ID_SIP_MC,
- DEVLINK_TRAP_GENERIC_ID_SIP_LB,
- DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
- DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
- DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
- DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
- DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
- DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
- DEVLINK_TRAP_GENERIC_ID_RPF,
- DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
- DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
- DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
- DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
- DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
- DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
- DEVLINK_MLXSW_TRAP_ID_IRIF_DISABLED,
- DEVLINK_MLXSW_TRAP_ID_ERIF_DISABLED,
- DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
- DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
- DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
- DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
- DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
- DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP,
- DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP,
-};
+ for (i = 0; i < trap->policers_count; i++) {
+ if (trap->policer_items_arr[i].policer.id == id)
+ return &trap->policer_items_arr[i];
+ }
-#define MLXSW_SP_THIN_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
+ return NULL;
+}
-static struct mlxsw_sp_trap_policer_item *
-mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
+static struct mlxsw_sp_trap_group_item *
+mlxsw_sp_trap_group_item_lookup(struct mlxsw_sp *mlxsw_sp, u16 id)
+{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = 0; i < trap->groups_count; i++) {
+ if (trap->group_items_arr[i].group.id == id)
+ return &trap->group_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_trap_item *
+mlxsw_sp_trap_item_lookup(struct mlxsw_sp *mlxsw_sp, u16 id)
{
- struct mlxsw_sp_trap_policer_item *policer_item;
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
- list_for_each_entry(policer_item, &trap->policer_item_list, list) {
- if (policer_item->id == id)
- return policer_item;
+ for (i = 0; i < trap->traps_count; i++) {
+ if (trap->trap_items_arr[i].trap.id == id)
+ return &trap->trap_items_arr[i];
}
return NULL;
@@ -326,14 +1026,21 @@ mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
static int mlxsw_sp_trap_cpu_policers_set(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u16 hw_id;
/* The purpose of "thin" policer is to drop as many packets
* as possible. The dummy group is using it.
*/
- __set_bit(MLXSW_SP_THIN_POLICER_ID, mlxsw_sp->trap->policers_usage);
- mlxsw_reg_qpcr_pack(qpcr_pl, MLXSW_SP_THIN_POLICER_ID,
- MLXSW_REG_QPCR_IR_UNITS_M, false, 1, 4);
+ hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers);
+ if (WARN_ON(hw_id == trap->max_policers))
+ return -ENOBUFS;
+
+ __set_bit(hw_id, trap->policers_usage);
+ trap->thin_policer_hw_id = hw_id;
+ mlxsw_reg_qpcr_pack(qpcr_pl, hw_id, MLXSW_REG_QPCR_IR_UNITS_M,
+ false, 1, 4);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
}
@@ -342,82 +1049,214 @@ static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp)
char htgt_pl[MLXSW_REG_HTGT_LEN];
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
- MLXSW_SP_THIN_POLICER_ID, 0, 1);
+ mlxsw_sp->trap->thin_policer_hw_id, 0, 1);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
}
-static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_trap_policer_items_arr_init(struct mlxsw_sp *mlxsw_sp)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_policer_item);
+ u64 arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr);
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
u64 free_policers = 0;
- u32 last_id = 0;
- int err, i;
+ u32 last_id;
+ int i;
for_each_clear_bit(i, trap->policers_usage, trap->max_policers)
free_policers++;
- if (ARRAY_SIZE(mlxsw_sp_trap_policers_arr) > free_policers) {
+ if (arr_size > free_policers) {
dev_err(mlxsw_sp->bus_info->dev, "Exceeded number of supported packet trap policers\n");
return -ENOBUFS;
}
- trap->policers_arr = kcalloc(free_policers,
- sizeof(struct devlink_trap_policer),
- GFP_KERNEL);
- if (!trap->policers_arr)
+ trap->policer_items_arr = kcalloc(free_policers, elem_size, GFP_KERNEL);
+ if (!trap->policer_items_arr)
return -ENOMEM;
trap->policers_count = free_policers;
- for (i = 0; i < free_policers; i++) {
- const struct devlink_trap_policer *policer;
-
- if (i < ARRAY_SIZE(mlxsw_sp_trap_policers_arr)) {
- policer = &mlxsw_sp_trap_policers_arr[i];
- trap->policers_arr[i] = *policer;
- last_id = policer->id;
- } else {
- /* Use parameters set for first policer and override
- * relevant ones.
- */
- policer = &mlxsw_sp_trap_policers_arr[0];
- trap->policers_arr[i] = *policer;
- trap->policers_arr[i].id = ++last_id;
- trap->policers_arr[i].init_rate = 1;
- trap->policers_arr[i].init_burst = 16;
- }
+ /* Initialize policer items array with pre-defined policers. */
+ memcpy(trap->policer_items_arr, mlxsw_sp_trap_policer_items_arr,
+ elem_size * arr_size);
+
+ /* Initialize policer items array with the rest of the available
+ * policers.
+ */
+ last_id = mlxsw_sp_trap_policer_items_arr[arr_size - 1].policer.id;
+ for (i = arr_size; i < trap->policers_count; i++) {
+ const struct mlxsw_sp_trap_policer_item *policer_item;
+
+ /* Use parameters set for first policer and override
+ * relevant ones.
+ */
+ policer_item = &mlxsw_sp_trap_policer_items_arr[0];
+ trap->policer_items_arr[i] = *policer_item;
+ trap->policer_items_arr[i].policer.id = ++last_id;
+ trap->policer_items_arr[i].policer.init_rate = 1;
+ trap->policer_items_arr[i].policer.init_burst = 16;
}
- INIT_LIST_HEAD(&trap->policer_item_list);
+ return 0;
+}
+
+static void mlxsw_sp_trap_policer_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->policer_items_arr);
+}
+
+static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_policer_item *policer_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int err, i;
- err = devlink_trap_policers_register(devlink, trap->policers_arr,
- trap->policers_count);
+ err = mlxsw_sp_trap_policer_items_arr_init(mlxsw_sp);
if (err)
- goto err_trap_policers_register;
+ return err;
+
+ for (i = 0; i < trap->policers_count; i++) {
+ policer_item = &trap->policer_items_arr[i];
+ err = devlink_trap_policers_register(devlink,
+ &policer_item->policer, 1);
+ if (err)
+ goto err_trap_policer_register;
+ }
return 0;
-err_trap_policers_register:
- kfree(trap->policers_arr);
+err_trap_policer_register:
+ for (i--; i >= 0; i--) {
+ policer_item = &trap->policer_items_arr[i];
+ devlink_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
+ }
+ mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
return err;
}
static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_policer_item *policer_item;
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
- devlink_trap_policers_unregister(devlink, trap->policers_arr,
- trap->policers_count);
- WARN_ON(!list_empty(&trap->policer_item_list));
- kfree(trap->policers_arr);
+ for (i = trap->policers_count - 1; i >= 0; i--) {
+ policer_item = &trap->policer_items_arr[i];
+ devlink_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
+ }
+ mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
}
-int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
{
- size_t groups_count = ARRAY_SIZE(mlxsw_sp_trap_groups_arr);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ const struct mlxsw_sp_trap_group_item *group_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int err, i;
+
+ trap->group_items_arr = kmemdup(mlxsw_sp_trap_group_items_arr,
+ sizeof(mlxsw_sp_trap_group_items_arr),
+ GFP_KERNEL);
+ if (!trap->group_items_arr)
+ return -ENOMEM;
+
+ trap->groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr);
+
+ for (i = 0; i < trap->groups_count; i++) {
+ group_item = &trap->group_items_arr[i];
+ err = devlink_trap_groups_register(devlink, &group_item->group,
+ 1);
+ if (err)
+ goto err_trap_group_register;
+ }
+
+ return 0;
+
+err_trap_group_register:
+ for (i--; i >= 0; i--) {
+ group_item = &trap->group_items_arr[i];
+ devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ }
+ kfree(trap->group_items_arr);
+ return err;
+}
+
+static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = trap->groups_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_trap_group_item *group_item;
+
+ group_item = &trap->group_items_arr[i];
+ devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ }
+ kfree(trap->group_items_arr);
+}
+
+static bool
+mlxsw_sp_trap_listener_is_valid(const struct mlxsw_listener *listener)
+{
+ return listener->trap_id != 0;
+}
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ const struct mlxsw_sp_trap_item *trap_item;
+ int err, i;
+
+ trap->trap_items_arr = kmemdup(mlxsw_sp_trap_items_arr,
+ sizeof(mlxsw_sp_trap_items_arr),
+ GFP_KERNEL);
+ if (!trap->trap_items_arr)
+ return -ENOMEM;
+
+ trap->traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr);
+
+ for (i = 0; i < trap->traps_count; i++) {
+ trap_item = &trap->trap_items_arr[i];
+ err = devlink_traps_register(devlink, &trap_item->trap, 1,
+ mlxsw_sp);
+ if (err)
+ goto err_trap_register;
+ }
+
+ return 0;
+
+err_trap_register:
+ for (i--; i >= 0; i--) {
+ trap_item = &trap->trap_items_arr[i];
+ devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ }
+ kfree(trap->trap_items_arr);
+ return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ int i;
+
+ for (i = trap->traps_count - 1; i >= 0; i--) {
+ const struct mlxsw_sp_trap_item *trap_item;
+
+ trap_item = &trap->trap_items_arr[i];
+ devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ }
+ kfree(trap->trap_items_arr);
+}
+
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
int err;
err = mlxsw_sp_trap_cpu_policers_set(mlxsw_sp);
@@ -428,59 +1267,52 @@ int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- if (WARN_ON(ARRAY_SIZE(mlxsw_sp_listener_devlink_map) !=
- ARRAY_SIZE(mlxsw_sp_listeners_arr)))
- return -EINVAL;
-
err = mlxsw_sp_trap_policers_init(mlxsw_sp);
if (err)
return err;
- err = devlink_trap_groups_register(devlink, mlxsw_sp_trap_groups_arr,
- groups_count);
+ err = mlxsw_sp_trap_groups_init(mlxsw_sp);
if (err)
- goto err_trap_groups_register;
+ goto err_trap_groups_init;
- err = devlink_traps_register(devlink, mlxsw_sp_traps_arr,
- ARRAY_SIZE(mlxsw_sp_traps_arr), mlxsw_sp);
+ err = mlxsw_sp_traps_init(mlxsw_sp);
if (err)
- goto err_traps_register;
+ goto err_traps_init;
return 0;
-err_traps_register:
- devlink_trap_groups_unregister(devlink, mlxsw_sp_trap_groups_arr,
- groups_count);
-err_trap_groups_register:
+err_traps_init:
+ mlxsw_sp_trap_groups_fini(mlxsw_sp);
+err_trap_groups_init:
mlxsw_sp_trap_policers_fini(mlxsw_sp);
return err;
}
void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
- size_t groups_count = ARRAY_SIZE(mlxsw_sp_trap_groups_arr);
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
-
- devlink_traps_unregister(devlink, mlxsw_sp_traps_arr,
- ARRAY_SIZE(mlxsw_sp_traps_arr));
- devlink_trap_groups_unregister(devlink, mlxsw_sp_trap_groups_arr,
- groups_count);
+ mlxsw_sp_traps_fini(mlxsw_sp);
+ mlxsw_sp_trap_groups_fini(mlxsw_sp);
mlxsw_sp_trap_policers_fini(mlxsw_sp);
}
int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) {
const struct mlxsw_listener *listener;
int err;
- if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
continue;
- listener = &mlxsw_sp_listeners_arr[i];
-
err = mlxsw_core_trap_register(mlxsw_core, listener, trap_ctx);
if (err)
return err;
@@ -492,15 +1324,20 @@ int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return;
+
+ for (i = MLXSW_SP_TRAP_LISTENERS_MAX - 1; i >= 0; i--) {
const struct mlxsw_listener *listener;
- if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
continue;
- listener = &mlxsw_sp_listeners_arr[i];
-
mlxsw_core_trap_unregister(mlxsw_core, listener, trap_ctx);
}
}
@@ -509,16 +1346,23 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap,
enum devlink_trap_action action)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp_trap_item *trap_item;
int i;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ trap_item = mlxsw_sp_trap_item_lookup(mlxsw_sp, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) {
const struct mlxsw_listener *listener;
bool enabled;
int err;
- if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ listener = &trap_item->listeners_arr[i];
+ if (!mlxsw_sp_trap_listener_is_valid(listener))
continue;
- listener = &mlxsw_sp_listeners_arr[i];
+
switch (action) {
case DEVLINK_TRAP_ACTION_DROP:
enabled = false;
@@ -544,33 +1388,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
+ const struct mlxsw_sp_trap_group_item *group_item;
char htgt_pl[MLXSW_REG_HTGT_LEN];
- u8 priority, tc, group_id;
-
- switch (group->id) {
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
- group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS;
- priority = 0;
- tc = 1;
- break;
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:
- group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS;
- priority = 0;
- tc = 1;
- break;
- case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
- group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS;
- priority = 0;
- tc = 1;
- break;
- case DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS:
- group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS;
- priority = 0;
- tc = 1;
- break;
- default:
+
+ group_item = mlxsw_sp_trap_group_item_lookup(mlxsw_sp, group->id);
+ if (WARN_ON(!group_item))
return -EINVAL;
- }
if (policer_id) {
struct mlxsw_sp_trap_policer_item *policer_item;
@@ -582,7 +1405,8 @@ __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
hw_policer_id = policer_item->hw_id;
}
- mlxsw_reg_htgt_pack(htgt_pl, group_id, hw_policer_id, priority, tc);
+ mlxsw_reg_htgt_pack(htgt_pl, group_item->hw_group_id, hw_policer_id,
+ group_item->priority, group_item->priority);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
@@ -602,10 +1426,10 @@ int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id);
}
-static struct mlxsw_sp_trap_policer_item *
-mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp, u32 id)
+static int
+mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_trap_policer_item *policer_item)
{
- struct mlxsw_sp_trap_policer_item *policer_item;
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
u16 hw_id;
@@ -615,27 +1439,19 @@ mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp, u32 id)
*/
hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers);
if (WARN_ON(hw_id == trap->max_policers))
- return ERR_PTR(-ENOBUFS);
-
- policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
- if (!policer_item)
- return ERR_PTR(-ENOMEM);
+ return -ENOBUFS;
__set_bit(hw_id, trap->policers_usage);
policer_item->hw_id = hw_id;
- policer_item->id = id;
- list_add_tail(&policer_item->list, &trap->policer_item_list);
- return policer_item;
+ return 0;
}
static void
mlxsw_sp_trap_policer_item_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_trap_policer_item *policer_item)
{
- list_del(&policer_item->list);
__clear_bit(policer_item->hw_id, mlxsw_sp->trap->policers_usage);
- kfree(policer_item);
}
static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size,
@@ -678,9 +1494,13 @@ int mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_trap_policer_item *policer_item;
int err;
- policer_item = mlxsw_sp_trap_policer_item_init(mlxsw_sp, policer->id);
- if (IS_ERR(policer_item))
- return PTR_ERR(policer_item);
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ err = mlxsw_sp_trap_policer_item_init(mlxsw_sp, policer_item);
+ if (err)
+ return err;
err = __mlxsw_sp_trap_policer_set(mlxsw_sp, policer_item->hw_id,
policer->init_rate,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
index 8c54897ba173..13ac412f4d53 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
@@ -8,17 +8,19 @@
#include <net/devlink.h>
struct mlxsw_sp_trap {
- struct devlink_trap_policer *policers_arr; /* Registered policers */
+ struct mlxsw_sp_trap_policer_item *policer_items_arr;
u64 policers_count; /* Number of registered policers */
- struct list_head policer_item_list;
+
+ struct mlxsw_sp_trap_group_item *group_items_arr;
+ u64 groups_count; /* Number of registered groups */
+
+ struct mlxsw_sp_trap_item *trap_items_arr;
+ u64 traps_count; /* Number of registered traps */
+
+ u16 thin_policer_hw_id;
+
u64 max_policers;
unsigned long policers_usage[]; /* Usage bitmap */
};
-struct mlxsw_sp_trap_policer_item {
- u16 hw_id;
- u32 id;
- struct list_head list; /* Member of policer_item_list */
-};
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 2503f61db5fb..b438f5576e18 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1404,6 +1404,11 @@ err_port_module_info_get:
return err;
}
+enum {
+ MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX = 1,
+ MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL = 2,
+};
+
#define MLXSW_SX_RXL(_trap_id) \
MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
false, SX2_RX, FORWARD)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index eaa521b7561b..28e60697d14e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -55,16 +55,19 @@ enum {
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
- MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISMENT = 0x8B,
+ MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISEMENT = 0x8B,
MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_SOLICITATION = 0x8C,
- MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISMENT = 0x8D,
+ MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISEMENT = 0x8D,
MLXSW_TRAP_ID_L3_IPV6_REDIRECTION = 0x8E,
+ MLXSW_TRAP_ID_IPV4_DHCP = 0x8F,
MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
+ MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
+ MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index b9c4d48e28e4..09f35209d43d 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -38,6 +38,8 @@ config KS8851_MLL
tristate "Micrel KS8851 MLL"
depends on HAS_IOMEM
select MII
+ select CRC32
+ select EEPROM_93CX6
---help---
This platform driver is for Micrel KS8851 Address/data bus
multiplexed network chip.
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
index 6d8ac5527aef..5cc00d22c708 100644
--- a/drivers/net/ethernet/micrel/Makefile
+++ b/drivers/net/ethernet/micrel/Makefile
@@ -5,5 +5,7 @@
obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o
+ks8851-objs = ks8851_common.o ks8851_spi.o
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+ks8851_mll-objs = ks8851_common.o ks8851_par.o
obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 8f834aef8e32..2b319e451121 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -7,6 +7,11 @@
* KS8851 register definitions
*/
+#ifndef __KS8851_H__
+#define __KS8851_H__
+
+#include <linux/eeprom_93cx6.h>
+
#define KS_CCR 0x08
#define CCR_LE (1 << 10) /* KSZ8851-16MLL */
#define CCR_EEPROM (1 << 9)
@@ -19,7 +24,7 @@
#define CCR_32PIN (1 << 0) /* KSZ8851SNL */
/* MAC address registers */
-#define KS_MAR(_m) (0x15 - (_m))
+#define KS_MAR(_m) (0x14 - (_m))
#define KS_MARL 0x10
#define KS_MARM 0x12
#define KS_MARH 0x14
@@ -300,3 +305,147 @@
#define TXFR_TXIC (1 << 15)
#define TXFR_TXFID_MASK (0x3f << 0)
#define TXFR_TXFID_SHIFT (0)
+
+/**
+ * struct ks8851_rxctrl - KS8851 driver rx control
+ * @mchash: Multicast hash-table data.
+ * @rxcr1: KS_RXCR1 register setting
+ * @rxcr2: KS_RXCR2 register setting
+ *
+ * Representation of the settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings. This
+ * is used to make the job of working out if the receive settings change and
+ * then issuing the new settings to the worker that will send the necessary
+ * commands.
+ */
+struct ks8851_rxctrl {
+ u16 mchash[4];
+ u16 rxcr1;
+ u16 rxcr2;
+};
+
+/**
+ * union ks8851_tx_hdr - tx header data
+ * @txb: The header as bytes
+ * @txw: The header as 16bit, little-endian words
+ *
+ * A dual representation of the tx header data to allow
+ * access to individual bytes, and to allow 16bit accesses
+ * with 16bit alignment.
+ */
+union ks8851_tx_hdr {
+ u8 txb[6];
+ __le16 txw[3];
+};
+
+/**
+ * struct ks8851_net - KS8851 driver private data
+ * @netdev: The network device we're bound to
+ * @statelock: Lock on this structure for tx list.
+ * @mii: The MII state information for the mii calls.
+ * @rxctrl: RX settings for @rxctrl_work.
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @txq: Queue of packets for transmission.
+ * @txh: Space for generating packet TX header in DMA-able data
+ * @rxd: Space for receiving SPI data, in DMA-able space.
+ * @txd: Space for transmitting SPI data, in DMA-able space.
+ * @msg_enable: The message flags controlling driver output (see ethtool).
+ * @fid: Incrementing frame id tag.
+ * @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
+ * @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
+ * @vdd_reg: Optional regulator supplying the chip
+ * @vdd_io: Optional digital power supply for IO
+ * @gpio: Optional reset_n gpio
+ * @lock: Bus access lock callback
+ * @unlock: Bus access unlock callback
+ * @rdreg16: 16bit register read callback
+ * @wrreg16: 16bit register write callback
+ * @rdfifo: FIFO read callback
+ * @wrfifo: FIFO write callback
+ * @start_xmit: start_xmit() implementation callback
+ * @rx_skb: rx_skb() implementation callback
+ * @flush_tx_work: flush_tx_work() implementation callback
+ *
+ * The @statelock is used to protect information in the structure which may
+ * need to be accessed via several sources, such as the network driver layer
+ * or one of the work queues.
+ *
+ * We align the buffers we may use for rx/tx to ensure that if the SPI driver
+ * wants to DMA map them, it will not have any problems with data the driver
+ * modifies.
+ */
+struct ks8851_net {
+ struct net_device *netdev;
+ spinlock_t statelock;
+
+ union ks8851_tx_hdr txh ____cacheline_aligned;
+ u8 rxd[8];
+ u8 txd[8];
+
+ u32 msg_enable ____cacheline_aligned;
+ u16 tx_space;
+ u8 fid;
+
+ u16 rc_ier;
+ u16 rc_rxqcr;
+ u16 rc_ccr;
+
+ struct mii_if_info mii;
+ struct ks8851_rxctrl rxctrl;
+
+ struct work_struct rxctrl_work;
+
+ struct sk_buff_head txq;
+
+ struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
+ struct regulator *vdd_io;
+ int gpio;
+
+ void (*lock)(struct ks8851_net *ks,
+ unsigned long *flags);
+ void (*unlock)(struct ks8851_net *ks,
+ unsigned long *flags);
+ unsigned int (*rdreg16)(struct ks8851_net *ks,
+ unsigned int reg);
+ void (*wrreg16)(struct ks8851_net *ks,
+ unsigned int reg, unsigned int val);
+ void (*rdfifo)(struct ks8851_net *ks, u8 *buff,
+ unsigned int len);
+ void (*wrfifo)(struct ks8851_net *ks,
+ struct sk_buff *txp, bool irq);
+ netdev_tx_t (*start_xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+ void (*rx_skb)(struct ks8851_net *ks,
+ struct sk_buff *skb);
+ void (*flush_tx_work)(struct ks8851_net *ks);
+};
+
+int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+ int msg_en);
+int ks8851_remove_common(struct device *dev);
+int ks8851_suspend(struct device *dev);
+int ks8851_resume(struct device *dev);
+
+static __maybe_unused SIMPLE_DEV_PM_OPS(ks8851_pm_ops,
+ ks8851_suspend, ks8851_resume);
+
+/**
+ * ks8851_done_tx - update and then free skbuff after transmitting
+ * @ks: The device state
+ * @txb: The buffer transmitted
+ */
+static void __maybe_unused ks8851_done_tx(struct ks8851_net *ks,
+ struct sk_buff *txb)
+{
+ struct net_device *dev = ks->netdev;
+
+ dev->stats.tx_bytes += txb->len;
+ dev->stats.tx_packets++;
+
+ dev_kfree_skb(txb);
+}
+
+#endif /* __KS8851_H__ */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 33305c9c5a62..d65872172229 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -19,10 +19,8 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
-#include <linux/eeprom_93cx6.h>
#include <linux/regulator/consumer.h>
-#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
@@ -30,255 +28,41 @@
#include "ks8851.h"
/**
- * struct ks8851_rxctrl - KS8851 driver rx control
- * @mchash: Multicast hash-table data.
- * @rxcr1: KS_RXCR1 register setting
- * @rxcr2: KS_RXCR2 register setting
- *
- * Representation of the settings needs to control the receive filtering
- * such as the multicast hash-filter and the receive register settings. This
- * is used to make the job of working out if the receive settings change and
- * then issuing the new settings to the worker that will send the necessary
- * commands.
- */
-struct ks8851_rxctrl {
- u16 mchash[4];
- u16 rxcr1;
- u16 rxcr2;
-};
-
-/**
- * union ks8851_tx_hdr - tx header data
- * @txb: The header as bytes
- * @txw: The header as 16bit, little-endian words
- *
- * A dual representation of the tx header data to allow
- * access to individual bytes, and to allow 16bit accesses
- * with 16bit alignment.
- */
-union ks8851_tx_hdr {
- u8 txb[6];
- __le16 txw[3];
-};
-
-/**
- * struct ks8851_net - KS8851 driver private data
- * @netdev: The network device we're bound to
- * @spidev: The spi device we're bound to.
- * @lock: Lock to ensure that the device is not accessed when busy.
- * @statelock: Lock on this structure for tx list.
- * @mii: The MII state information for the mii calls.
- * @rxctrl: RX settings for @rxctrl_work.
- * @tx_work: Work queue for tx packets
- * @rxctrl_work: Work queue for updating RX mode and multicast lists
- * @txq: Queue of packets for transmission.
- * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
- * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2.
- * @txh: Space for generating packet TX header in DMA-able data
- * @rxd: Space for receiving SPI data, in DMA-able space.
- * @txd: Space for transmitting SPI data, in DMA-able space.
- * @msg_enable: The message flags controlling driver output (see ethtool).
- * @fid: Incrementing frame id tag.
- * @rc_ier: Cached copy of KS_IER.
- * @rc_ccr: Cached copy of KS_CCR.
- * @rc_rxqcr: Cached copy of KS_RXQCR.
- * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
- * @vdd_reg: Optional regulator supplying the chip
- * @vdd_io: Optional digital power supply for IO
- * @gpio: Optional reset_n gpio
- *
- * The @lock ensures that the chip is protected when certain operations are
- * in progress. When the read or write packet transfer is in progress, most
- * of the chip registers are not ccessible until the transfer is finished and
- * the DMA has been de-asserted.
- *
- * The @statelock is used to protect information in the structure which may
- * need to be accessed via several sources, such as the network driver layer
- * or one of the work queues.
- *
- * We align the buffers we may use for rx/tx to ensure that if the SPI driver
- * wants to DMA map them, it will not have any problems with data the driver
- * modifies.
- */
-struct ks8851_net {
- struct net_device *netdev;
- struct spi_device *spidev;
- struct mutex lock;
- spinlock_t statelock;
-
- union ks8851_tx_hdr txh ____cacheline_aligned;
- u8 rxd[8];
- u8 txd[8];
-
- u32 msg_enable ____cacheline_aligned;
- u16 tx_space;
- u8 fid;
-
- u16 rc_ier;
- u16 rc_rxqcr;
- u16 rc_ccr;
-
- struct mii_if_info mii;
- struct ks8851_rxctrl rxctrl;
-
- struct work_struct tx_work;
- struct work_struct rxctrl_work;
-
- struct sk_buff_head txq;
-
- struct spi_message spi_msg1;
- struct spi_message spi_msg2;
- struct spi_transfer spi_xfer1;
- struct spi_transfer spi_xfer2[2];
-
- struct eeprom_93cx6 eeprom;
- struct regulator *vdd_reg;
- struct regulator *vdd_io;
- int gpio;
-};
-
-static int msg_enable;
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD (0x00)
-#define KS_SPIOP_WR (0x40)
-#define KS_SPIOP_RXFIFO (0x80)
-#define KS_SPIOP_TXFIFO (0xC0)
-
-/* shift for byte-enable data */
-#define BYTE_EN(_x) ((_x) << 2)
-
-/* turn register number and byte-enable mask into data for start of packet */
-#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg) << (8+2) | (_reg) >> 6)
-
-/* SPI register read/write calls.
+ * ks8851_lock - register access lock
+ * @ks: The chip state
+ * @flags: Spinlock flags
*
- * All these calls issue SPI transactions to access the chip's registers. They
- * all require that the necessary lock is held to prevent accesses when the
- * chip is busy transferring packet data (RX/TX FIFO accesses).
+ * Claim chip register access lock
*/
+static void ks8851_lock(struct ks8851_net *ks, unsigned long *flags)
+{
+ ks->lock(ks, flags);
+}
/**
- * ks8851_wrreg16 - write 16bit register value to chip
+ * ks8851_unlock - register access unlock
* @ks: The chip state
- * @reg: The register address
- * @val: The value to write
+ * @flags: Spinlock flags
*
- * Issue a write to put the value @val into the register specified in @reg.
+ * Release chip register access lock
*/
-static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
+static void ks8851_unlock(struct ks8851_net *ks, unsigned long *flags)
{
- struct spi_transfer *xfer = &ks->spi_xfer1;
- struct spi_message *msg = &ks->spi_msg1;
- __le16 txb[2];
- int ret;
-
- txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR);
- txb[1] = cpu_to_le16(val);
-
- xfer->tx_buf = txb;
- xfer->rx_buf = NULL;
- xfer->len = 4;
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "spi_sync() failed\n");
+ ks->unlock(ks, flags);
}
/**
- * ks8851_wrreg8 - write 8bit register value to chip
+ * ks8851_wrreg16 - write 16bit register value to chip
* @ks: The chip state
* @reg: The register address
* @val: The value to write
*
* Issue a write to put the value @val into the register specified in @reg.
*/
-static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
-{
- struct spi_transfer *xfer = &ks->spi_xfer1;
- struct spi_message *msg = &ks->spi_msg1;
- __le16 txb[2];
- int ret;
- int bit;
-
- bit = 1 << (reg & 3);
-
- txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
- txb[1] = val;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = NULL;
- xfer->len = 3;
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "spi_sync() failed\n");
-}
-
-/**
- * ks8851_rdreg - issue read register command and return the data
- * @ks: The device state
- * @op: The register address and byte enables in message format.
- * @rxb: The RX buffer to return the result into
- * @rxl: The length of data expected.
- *
- * This is the low level read call that issues the necessary spi message(s)
- * to read data from the register specified in @op.
- */
-static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
- u8 *rxb, unsigned rxl)
-{
- struct spi_transfer *xfer;
- struct spi_message *msg;
- __le16 *txb = (__le16 *)ks->txd;
- u8 *trx = ks->rxd;
- int ret;
-
- txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
-
- if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
- msg = &ks->spi_msg2;
- xfer = ks->spi_xfer2;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = NULL;
- xfer->len = 2;
-
- xfer++;
- xfer->tx_buf = NULL;
- xfer->rx_buf = trx;
- xfer->len = rxl;
- } else {
- msg = &ks->spi_msg1;
- xfer = &ks->spi_xfer1;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = trx;
- xfer->len = rxl + 2;
- }
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (ks->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
- memcpy(rxb, trx, rxl);
- else
- memcpy(rxb, trx + 2, rxl);
-}
-
-/**
- * ks8851_rdreg8 - read 8 bit register from device
- * @ks: The chip information
- * @reg: The register address
- *
- * Read a 8bit register from the chip, returning the result
-*/
-static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg)
+static void ks8851_wrreg16(struct ks8851_net *ks, unsigned int reg,
+ unsigned int val)
{
- u8 rxb[1];
-
- ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1);
- return rxb[0];
+ ks->wrreg16(ks, reg, val);
}
/**
@@ -287,32 +71,11 @@ static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg)
* @reg: The register address
*
* Read a 16bit register from the chip, returning the result
-*/
-static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg)
-{
- __le16 rx = 0;
-
- ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2);
- return le16_to_cpu(rx);
-}
-
-/**
- * ks8851_rdreg32 - read 32 bit register from device
- * @ks: The chip information
- * @reg: The register address
- *
- * Read a 32bit register from the chip.
- *
- * Note, this read requires the address be aligned to 4 bytes.
-*/
-static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg)
+ */
+static unsigned int ks8851_rdreg16(struct ks8851_net *ks,
+ unsigned int reg)
{
- __le32 rx = 0;
-
- WARN_ON(reg & 3);
-
- ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4);
- return le32_to_cpu(rx);
+ return ks->rdreg16(ks, reg);
}
/**
@@ -368,21 +131,27 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
static int ks8851_write_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
+ u16 val;
int i;
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
/*
* Wake up chip in case it was powered off when stopped; otherwise,
* the first write to the MAC address does not take effect.
*/
ks8851_set_powermode(ks, PMECR_PM_NORMAL);
- for (i = 0; i < ETH_ALEN; i++)
- ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ val = (dev->dev_addr[i] << 8) | dev->dev_addr[i + 1];
+ ks8851_wrreg16(ks, KS_MAR(i), val);
+ }
+
if (!netif_running(dev))
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
return 0;
}
@@ -396,19 +165,25 @@ static int ks8851_write_mac_addr(struct net_device *dev)
static void ks8851_read_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
+ u16 reg;
int i;
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ reg = ks8851_rdreg16(ks, KS_MAR(i));
+ dev->dev_addr[i] = reg >> 8;
+ dev->dev_addr[i + 1] = reg & 0xff;
+ }
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
}
/**
* ks8851_init_mac - initialise the mac address
* @ks: The device structure
+ * @np: The device node pointer
*
* Get or create the initial mac address for the device and then set that
* into the station address register. A mac address supplied in the device
@@ -416,12 +191,12 @@ static void ks8851_read_mac_addr(struct net_device *dev)
* we try that. If no valid mac address is found we use eth_random_addr()
* to create a new one.
*/
-static void ks8851_init_mac(struct ks8851_net *ks)
+static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
{
struct net_device *dev = ks->netdev;
const u8 *mac_addr;
- mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
+ mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr)) {
ether_addr_copy(dev->dev_addr, mac_addr);
ks8851_write_mac_addr(dev);
@@ -442,48 +217,12 @@ static void ks8851_init_mac(struct ks8851_net *ks)
}
/**
- * ks8851_rdfifo - read data from the receive fifo
- * @ks: The device state.
- * @buff: The buffer address
- * @len: The length of the data to read
- *
- * Issue an RXQ FIFO read command and read the @len amount of data from
- * the FIFO into the buffer specified by @buff.
- */
-static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
-{
- struct spi_transfer *xfer = ks->spi_xfer2;
- struct spi_message *msg = &ks->spi_msg2;
- u8 txb[1];
- int ret;
-
- netif_dbg(ks, rx_status, ks->netdev,
- "%s: %d@%p\n", __func__, len, buff);
-
- /* set the operation we're issuing */
- txb[0] = KS_SPIOP_RXFIFO;
-
- xfer->tx_buf = txb;
- xfer->rx_buf = NULL;
- xfer->len = 1;
-
- xfer++;
- xfer->rx_buf = buff;
- xfer->tx_buf = NULL;
- xfer->len = len;
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
-}
-
-/**
* ks8851_dbg_dumpkkt - dump initial packet contents to debug
* @ks: The device state
* @rxpkt: The data for the received packet
*
* Dump the initial data from the packet to dev_dbg().
-*/
+ */
static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
{
netdev_dbg(ks->netdev,
@@ -494,6 +233,16 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
}
/**
+ * ks8851_rx_skb - receive skbuff
+ * @ks: The device state.
+ * @skb: The skbuff
+ */
+static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
+{
+ ks->rx_skb(ks, skb);
+}
+
+/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
*
@@ -507,10 +256,9 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
unsigned rxfc;
unsigned rxlen;
unsigned rxstat;
- u32 rxh;
u8 *rxpkt;
- rxfc = ks8851_rdreg8(ks, KS_RXFC);
+ rxfc = (ks8851_rdreg16(ks, KS_RXFCTR) >> 8) & 0xff;
netif_dbg(ks, rx_status, ks->netdev,
"%s: %d packets\n", __func__, rxfc);
@@ -526,9 +274,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
*/
for (; rxfc != 0; rxfc--) {
- rxh = ks8851_rdreg32(ks, KS_RXFHSR);
- rxstat = rxh & 0xffff;
- rxlen = (rxh >> 16) & 0xfff;
+ rxstat = ks8851_rdreg16(ks, KS_RXFHSR);
+ rxlen = ks8851_rdreg16(ks, KS_RXFHBCR) & RXFHBCR_CNT_MASK;
netif_dbg(ks, rx_status, ks->netdev,
"rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
@@ -557,13 +304,13 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxpkt = skb_put(skb, rxlen) - 8;
- ks8851_rdfifo(ks, rxpkt, rxalign + 8);
+ ks->rdfifo(ks, rxpkt, rxalign + 8);
if (netif_msg_pktdata(ks))
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- netif_rx_ni(skb);
+ ks8851_rx_skb(ks, skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -590,10 +337,11 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
static irqreturn_t ks8851_irq(int irq, void *_ks)
{
struct ks8851_net *ks = _ks;
- unsigned status;
unsigned handled = 0;
+ unsigned long flags;
+ unsigned int status;
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
@@ -631,7 +379,7 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
handled |= IRQ_RXI;
if (status & IRQ_SPIBEI) {
- dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__);
+ netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
handled |= IRQ_SPIBEI;
}
@@ -662,7 +410,7 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
}
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
@@ -674,108 +422,13 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
}
/**
- * calc_txlen - calculate size of message to send packet
- * @len: Length of data
- *
- * Returns the size of the TXFIFO message needed to send
- * this packet.
- */
-static inline unsigned calc_txlen(unsigned len)
-{
- return ALIGN(len + 4, 4);
-}
-
-/**
- * ks8851_wrpkt - write packet to TX FIFO
- * @ks: The device state.
- * @txp: The sk_buff to transmit.
- * @irq: IRQ on completion of the packet.
- *
- * Send the @txp to the chip. This means creating the relevant packet header
- * specifying the length of the packet and the other information the chip
- * needs, such as IRQ on completion. Send the header and the packet data to
- * the device.
- */
-static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
-{
- struct spi_transfer *xfer = ks->spi_xfer2;
- struct spi_message *msg = &ks->spi_msg2;
- unsigned fid = 0;
- int ret;
-
- netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
- __func__, txp, txp->len, txp->data, irq);
-
- fid = ks->fid++;
- fid &= TXFR_TXFID_MASK;
-
- if (irq)
- fid |= TXFR_TXIC; /* irq on completion */
-
- /* start header at txb[1] to align txw entries */
- ks->txh.txb[1] = KS_SPIOP_TXFIFO;
- ks->txh.txw[1] = cpu_to_le16(fid);
- ks->txh.txw[2] = cpu_to_le16(txp->len);
-
- xfer->tx_buf = &ks->txh.txb[1];
- xfer->rx_buf = NULL;
- xfer->len = 5;
-
- xfer++;
- xfer->tx_buf = txp->data;
- xfer->rx_buf = NULL;
- xfer->len = ALIGN(txp->len, 4);
-
- ret = spi_sync(ks->spidev, msg);
- if (ret < 0)
- netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
-}
-
-/**
- * ks8851_done_tx - update and then free skbuff after transmitting
+ * ks8851_flush_tx_work - flush outstanding TX work
* @ks: The device state
- * @txb: The buffer transmitted
- */
-static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb)
-{
- struct net_device *dev = ks->netdev;
-
- dev->stats.tx_bytes += txb->len;
- dev->stats.tx_packets++;
-
- dev_kfree_skb(txb);
-}
-
-/**
- * ks8851_tx_work - process tx packet(s)
- * @work: The work strucutre what was scheduled.
- *
- * This is called when a number of packets have been scheduled for
- * transmission and need to be sent to the device.
*/
-static void ks8851_tx_work(struct work_struct *work)
+static void ks8851_flush_tx_work(struct ks8851_net *ks)
{
- struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work);
- struct sk_buff *txb;
- bool last = skb_queue_empty(&ks->txq);
-
- mutex_lock(&ks->lock);
-
- while (!last) {
- txb = skb_dequeue(&ks->txq);
- last = skb_queue_empty(&ks->txq);
-
- if (txb != NULL) {
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
- ks8851_wrpkt(ks, txb, last);
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
- ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
-
- ks8851_done_tx(ks, txb);
- }
- }
-
- mutex_unlock(&ks->lock);
+ if (ks->flush_tx_work)
+ ks->flush_tx_work(ks);
}
/**
@@ -788,6 +441,7 @@ static void ks8851_tx_work(struct work_struct *work)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
int ret;
ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
@@ -800,7 +454,7 @@ static int ks8851_net_open(struct net_device *dev)
/* lock the card, even if we may not actually be doing anything
* else at the moment */
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
netif_dbg(ks, ifup, ks->netdev, "opening\n");
@@ -844,23 +498,14 @@ static int ks8851_net_open(struct net_device *dev)
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
/* clear then enable interrupts */
-
-#define STD_IRQ (IRQ_LCI | /* Link Change */ \
- IRQ_TXI | /* TX done */ \
- IRQ_RXI | /* RX done */ \
- IRQ_SPIBEI | /* SPI bus error */ \
- IRQ_TXPSI | /* TX process stop */ \
- IRQ_RXPSI) /* RX process stop */
-
- ks->rc_ier = STD_IRQ;
- ks8851_wrreg16(ks, KS_ISR, STD_IRQ);
- ks8851_wrreg16(ks, KS_IER, STD_IRQ);
+ ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
+ ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
netif_start_queue(ks->netdev);
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
mii_check_link(&ks->mii);
return 0;
}
@@ -876,22 +521,23 @@ static int ks8851_net_open(struct net_device *dev)
static int ks8851_net_stop(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
netif_info(ks, ifdown, dev, "shutting down\n");
netif_stop_queue(dev);
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
/* turn off the IRQs and ack any outstanding */
ks8851_wrreg16(ks, KS_IER, 0x0000);
ks8851_wrreg16(ks, KS_ISR, 0xffff);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
/* stop any outstanding work */
- flush_work(&ks->tx_work);
+ ks8851_flush_tx_work(ks);
flush_work(&ks->rxctrl_work);
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
/* shutdown RX process */
ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
@@ -900,7 +546,7 @@ static int ks8851_net_stop(struct net_device *dev)
/* set powermode to soft power down to save power */
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
/* ensure any queued tx buffers are dumped */
while (!skb_queue_empty(&ks->txq)) {
@@ -934,26 +580,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned needed = calc_txlen(skb->len);
- netdev_tx_t ret = NETDEV_TX_OK;
-
- netif_dbg(ks, tx_queued, ks->netdev,
- "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
- spin_lock(&ks->statelock);
-
- if (needed > ks->tx_space) {
- netif_stop_queue(dev);
- ret = NETDEV_TX_BUSY;
- } else {
- ks->tx_space -= needed;
- skb_queue_tail(&ks->txq, skb);
- }
-
- spin_unlock(&ks->statelock);
- schedule_work(&ks->tx_work);
-
- return ret;
+ return ks->start_xmit(skb, dev);
}
/**
@@ -972,13 +600,14 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
static void ks8851_rxctrl_work(struct work_struct *work)
{
struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
+ unsigned long flags;
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
/* need to shutdown RXQ before modifying filter parameters */
ks8851_wrreg16(ks, KS_RXCR1, 0x00);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
}
static void ks8851_set_rx_mode(struct net_device *dev)
@@ -1160,11 +789,6 @@ static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
*/
static int ks8851_eeprom_claim(struct ks8851_net *ks)
{
- if (!(ks->rc_ccr & CCR_EEPROM))
- return -ENOENT;
-
- mutex_lock(&ks->lock);
-
/* start with clock low, cs high */
ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
return 0;
@@ -1181,7 +805,6 @@ static void ks8851_eeprom_release(struct ks8851_net *ks)
unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
- mutex_unlock(&ks->lock);
}
#define KS_EEPROM_MAGIC (0x00008851)
@@ -1191,6 +814,7 @@ static int ks8851_set_eeprom(struct net_device *dev,
{
struct ks8851_net *ks = netdev_priv(dev);
int offset = ee->offset;
+ unsigned long flags;
int len = ee->len;
u16 tmp;
@@ -1201,9 +825,13 @@ static int ks8851_set_eeprom(struct net_device *dev,
if (ee->magic != KS_EEPROM_MAGIC)
return -EINVAL;
- if (ks8851_eeprom_claim(ks))
+ if (!(ks->rc_ccr & CCR_EEPROM))
return -ENOENT;
+ ks8851_lock(ks, &flags);
+
+ ks8851_eeprom_claim(ks);
+
eeprom_93cx6_wren(&ks->eeprom, true);
/* ethtool currently only supports writing bytes, which means
@@ -1223,6 +851,7 @@ static int ks8851_set_eeprom(struct net_device *dev,
eeprom_93cx6_wren(&ks->eeprom, false);
ks8851_eeprom_release(ks);
+ ks8851_unlock(ks, &flags);
return 0;
}
@@ -1232,19 +861,25 @@ static int ks8851_get_eeprom(struct net_device *dev,
{
struct ks8851_net *ks = netdev_priv(dev);
int offset = ee->offset;
+ unsigned long flags;
int len = ee->len;
/* must be 2 byte aligned */
if (len & 1 || offset & 1)
return -EINVAL;
- if (ks8851_eeprom_claim(ks))
+ if (!(ks->rc_ccr & CCR_EEPROM))
return -ENOENT;
+ ks8851_lock(ks, &flags);
+
+ ks8851_eeprom_claim(ks);
+
ee->magic = KS_EEPROM_MAGIC;
eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
ks8851_eeprom_release(ks);
+ ks8851_unlock(ks, &flags);
return 0;
}
@@ -1318,6 +953,7 @@ static int ks8851_phy_reg(int reg)
static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
int ksreg;
int result;
@@ -1325,9 +961,9 @@ static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
if (!ksreg)
return 0x0; /* no error return allowed, so use zero */
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
result = ks8851_rdreg16(ks, ksreg);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
return result;
}
@@ -1336,13 +972,14 @@ static void ks8851_phy_write(struct net_device *dev,
int phy, int reg, int value)
{
struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
int ksreg;
ksreg = ks8851_phy_reg(reg);
if (ksreg) {
- mutex_lock(&ks->lock);
+ ks8851_lock(ks, &flags);
ks8851_wrreg16(ks, ksreg, value);
- mutex_unlock(&ks->lock);
+ ks8851_unlock(ks, &flags);
}
}
@@ -1382,7 +1019,7 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
#ifdef CONFIG_PM_SLEEP
-static int ks8851_suspend(struct device *dev)
+int ks8851_suspend(struct device *dev)
{
struct ks8851_net *ks = dev_get_drvdata(dev);
struct net_device *netdev = ks->netdev;
@@ -1395,7 +1032,7 @@ static int ks8851_suspend(struct device *dev)
return 0;
}
-static int ks8851_resume(struct device *dev)
+int ks8851_resume(struct device *dev)
{
struct ks8851_net *ks = dev_get_drvdata(dev);
struct net_device *netdev = ks->netdev;
@@ -1409,46 +1046,32 @@ static int ks8851_resume(struct device *dev)
}
#endif
-static SIMPLE_DEV_PM_OPS(ks8851_pm_ops, ks8851_suspend, ks8851_resume);
-
-static int ks8851_probe(struct spi_device *spi)
+int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+ int msg_en)
{
- struct net_device *ndev;
- struct ks8851_net *ks;
- int ret;
+ struct ks8851_net *ks = netdev_priv(netdev);
unsigned cider;
int gpio;
+ int ret;
- ndev = alloc_etherdev(sizeof(struct ks8851_net));
- if (!ndev)
- return -ENOMEM;
-
- spi->bits_per_word = 8;
-
- ks = netdev_priv(ndev);
-
- ks->netdev = ndev;
- ks->spidev = spi;
+ ks->netdev = netdev;
ks->tx_space = 6144;
- gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
- 0, NULL);
- if (gpio == -EPROBE_DEFER) {
- ret = gpio;
- goto err_gpio;
- }
+ gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL);
+ if (gpio == -EPROBE_DEFER)
+ return gpio;
ks->gpio = gpio;
if (gpio_is_valid(gpio)) {
- ret = devm_gpio_request_one(&spi->dev, gpio,
+ ret = devm_gpio_request_one(dev, gpio,
GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
if (ret) {
- dev_err(&spi->dev, "reset gpio request failed\n");
- goto err_gpio;
+ dev_err(dev, "reset gpio request failed\n");
+ return ret;
}
}
- ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io");
+ ks->vdd_io = devm_regulator_get(dev, "vdd-io");
if (IS_ERR(ks->vdd_io)) {
ret = PTR_ERR(ks->vdd_io);
goto err_reg_io;
@@ -1456,12 +1079,11 @@ static int ks8851_probe(struct spi_device *spi)
ret = regulator_enable(ks->vdd_io);
if (ret) {
- dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
- ret);
+ dev_err(dev, "regulator vdd_io enable fail: %d\n", ret);
goto err_reg_io;
}
- ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd");
+ ks->vdd_reg = devm_regulator_get(dev, "vdd");
if (IS_ERR(ks->vdd_reg)) {
ret = PTR_ERR(ks->vdd_reg);
goto err_reg;
@@ -1469,8 +1091,7 @@ static int ks8851_probe(struct spi_device *spi)
ret = regulator_enable(ks->vdd_reg);
if (ret) {
- dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
- ret);
+ dev_err(dev, "regulator vdd enable fail: %d\n", ret);
goto err_reg;
}
@@ -1479,54 +1100,41 @@ static int ks8851_probe(struct spi_device *spi)
gpio_set_value(gpio, 1);
}
- mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
- INIT_WORK(&ks->tx_work, ks8851_tx_work);
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
- /* initialise pre-made spi transfer messages */
-
- spi_message_init(&ks->spi_msg1);
- spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1);
-
- spi_message_init(&ks->spi_msg2);
- spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
- spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
-
/* setup EEPROM state */
-
ks->eeprom.data = ks;
ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
ks->eeprom.register_read = ks8851_eeprom_regread;
ks->eeprom.register_write = ks8851_eeprom_regwrite;
/* setup mii state */
- ks->mii.dev = ndev;
+ ks->mii.dev = netdev;
ks->mii.phy_id = 1,
ks->mii.phy_id_mask = 1;
ks->mii.reg_num_mask = 0xf;
ks->mii.mdio_read = ks8851_phy_read;
ks->mii.mdio_write = ks8851_phy_write;
- dev_info(&spi->dev, "message enable is %d\n", msg_enable);
+ dev_info(dev, "message enable is %d\n", msg_en);
/* set the default message enable */
- ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
- NETIF_MSG_PROBE |
- NETIF_MSG_LINK));
+ ks->msg_enable = netif_msg_init(msg_en, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK);
skb_queue_head_init(&ks->txq);
- ndev->ethtool_ops = &ks8851_ethtool_ops;
- SET_NETDEV_DEV(ndev, &spi->dev);
+ netdev->ethtool_ops = &ks8851_ethtool_ops;
+ SET_NETDEV_DEV(netdev, dev);
- spi_set_drvdata(spi, ks);
+ dev_set_drvdata(dev, ks);
netif_carrier_off(ks->netdev);
- ndev->if_port = IF_PORT_100BASET;
- ndev->netdev_ops = &ks8851_netdev_ops;
- ndev->irq = spi->irq;
+ netdev->if_port = IF_PORT_100BASET;
+ netdev->netdev_ops = &ks8851_netdev_ops;
/* issue a global soft reset to reset the device. */
ks8851_soft_reset(ks, GRR_GSR);
@@ -1534,7 +1142,7 @@ static int ks8851_probe(struct spi_device *spi)
/* simple check for a valid chip being connected to the bus */
cider = ks8851_rdreg16(ks, KS_CIDER);
if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
- dev_err(&spi->dev, "failed to read device ID\n");
+ dev_err(dev, "failed to read device ID\n");
ret = -ENODEV;
goto err_id;
}
@@ -1543,16 +1151,16 @@ static int ks8851_probe(struct spi_device *spi)
ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
ks8851_read_selftest(ks);
- ks8851_init_mac(ks);
+ ks8851_init_mac(ks, dev->of_node);
- ret = register_netdev(ndev);
+ ret = register_netdev(netdev);
if (ret) {
- dev_err(&spi->dev, "failed to register network device\n");
+ dev_err(dev, "failed to register network device\n");
goto err_netdev;
}
- netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
- CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
+ netdev_info(netdev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
+ CIDER_REV_GET(cider), netdev->dev_addr, netdev->irq,
ks->rc_ccr & CCR_EEPROM ? "has" : "no");
return 0;
@@ -1565,49 +1173,21 @@ err_id:
err_reg:
regulator_disable(ks->vdd_io);
err_reg_io:
-err_gpio:
- free_netdev(ndev);
return ret;
}
-static int ks8851_remove(struct spi_device *spi)
+int ks8851_remove_common(struct device *dev)
{
- struct ks8851_net *priv = spi_get_drvdata(spi);
+ struct ks8851_net *priv = dev_get_drvdata(dev);
if (netif_msg_drv(priv))
- dev_info(&spi->dev, "remove\n");
+ dev_info(dev, "remove\n");
unregister_netdev(priv->netdev);
if (gpio_is_valid(priv->gpio))
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
- free_netdev(priv->netdev);
return 0;
}
-
-static const struct of_device_id ks8851_match_table[] = {
- { .compatible = "micrel,ks8851" },
- { }
-};
-MODULE_DEVICE_TABLE(of, ks8851_match_table);
-
-static struct spi_driver ks8851_driver = {
- .driver = {
- .name = "ks8851",
- .of_match_table = ks8851_match_table,
- .pm = &ks8851_pm_ops,
- },
- .probe = ks8851_probe,
- .remove = ks8851_remove,
-};
-module_spi_driver(ks8851_driver);
-
-MODULE_DESCRIPTION("KS8851 Network driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL");
-
-module_param_named(message, msg_enable, int, 0);
-MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
-MODULE_ALIAS("spi:ks8851");
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
deleted file mode 100644
index 45cc840d8e2e..000000000000
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ /dev/null
@@ -1,1393 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**
- * drivers/net/ethernet/micrel/ks8851_mll.c
- * Copyright (c) 2009 Micrel Inc.
- */
-
-/* Supports:
- * KS8851 16bit MLL chip from Micrel Inc.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/cache.h>
-#include <linux/crc32.h>
-#include <linux/crc32poly.h>
-#include <linux/mii.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/ks8851_mll.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_net.h>
-
-#include "ks8851.h"
-
-#define DRV_NAME "ks8851_mll"
-
-static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
-#define MAX_RECV_FRAMES 255
-#define MAX_BUF_SIZE 2048
-#define TX_BUF_SIZE 2000
-#define RX_BUF_SIZE 2000
-
-#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
- RXCR1_RXMAFMA | RXCR1_RXPAFMA)
-#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
-
-#define ENUM_BUS_NONE 0
-#define ENUM_BUS_8BIT 1
-#define ENUM_BUS_16BIT 2
-#define ENUM_BUS_32BIT 3
-
-#define MAX_MCAST_LST 32
-#define HW_MCAST_SIZE 8
-
-/**
- * union ks_tx_hdr - tx header data
- * @txb: The header as bytes
- * @txw: The header as 16bit, little-endian words
- *
- * A dual representation of the tx header data to allow
- * access to individual bytes, and to allow 16bit accesses
- * with 16bit alignment.
- */
-union ks_tx_hdr {
- u8 txb[4];
- __le16 txw[2];
-};
-
-/**
- * struct ks_net - KS8851 driver private data
- * @net_device : The network device we're bound to
- * @hw_addr : start address of data register.
- * @hw_addr_cmd : start address of command register.
- * @txh : temporaly buffer to save status/length.
- * @lock : Lock to ensure that the device is not accessed when busy.
- * @pdev : Pointer to platform device.
- * @mii : The MII state information for the mii calls.
- * @frame_head_info : frame header information for multi-pkt rx.
- * @statelock : Lock on this structure for tx list.
- * @msg_enable : The message flags controlling driver output (see ethtool).
- * @frame_cnt : number of frames received.
- * @bus_width : i/o bus width.
- * @rc_rxqcr : Cached copy of KS_RXQCR.
- * @rc_txcr : Cached copy of KS_TXCR.
- * @rc_ier : Cached copy of KS_IER.
- * @sharedbus : Multipex(addr and data bus) mode indicator.
- * @cmd_reg_cache : command register cached.
- * @cmd_reg_cache_int : command register cached. Used in the irq handler.
- * @promiscuous : promiscuous mode indicator.
- * @all_mcast : mutlicast indicator.
- * @mcast_lst_size : size of multicast list.
- * @mcast_lst : multicast list.
- * @mcast_bits : multicast enabed.
- * @mac_addr : MAC address assigned to this device.
- * @fid : frame id.
- * @extra_byte : number of extra byte prepended rx pkt.
- * @enabled : indicator this device works.
- *
- * The @lock ensures that the chip is protected when certain operations are
- * in progress. When the read or write packet transfer is in progress, most
- * of the chip registers are not accessible until the transfer is finished and
- * the DMA has been de-asserted.
- *
- * The @statelock is used to protect information in the structure which may
- * need to be accessed via several sources, such as the network driver layer
- * or one of the work queues.
- *
- */
-
-/* Receive multiplex framer header info */
-struct type_frame_head {
- u16 sts; /* Frame status */
- u16 len; /* Byte count */
-};
-
-struct ks_net {
- struct net_device *netdev;
- void __iomem *hw_addr;
- void __iomem *hw_addr_cmd;
- union ks_tx_hdr txh ____cacheline_aligned;
- struct mutex lock; /* spinlock to be interrupt safe */
- struct platform_device *pdev;
- struct mii_if_info mii;
- struct type_frame_head *frame_head_info;
- spinlock_t statelock;
- u32 msg_enable;
- u32 frame_cnt;
- int bus_width;
-
- u16 rc_rxqcr;
- u16 rc_txcr;
- u16 rc_ier;
- u16 sharedbus;
- u16 cmd_reg_cache;
- u16 cmd_reg_cache_int;
- u16 promiscuous;
- u16 all_mcast;
- u16 mcast_lst_size;
- u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
- u8 mcast_bits[HW_MCAST_SIZE];
- u8 mac_addr[6];
- u8 fid;
- u8 extra_byte;
- u8 enabled;
-};
-
-static int msg_enable;
-
-#define BE3 0x8000 /* Byte Enable 3 */
-#define BE2 0x4000 /* Byte Enable 2 */
-#define BE1 0x2000 /* Byte Enable 1 */
-#define BE0 0x1000 /* Byte Enable 0 */
-
-/* register read/write calls.
- *
- * All these calls issue transactions to access the chip's registers. They
- * all require that the necessary lock is held to prevent accesses when the
- * chip is busy transferring packet data (RX/TX FIFO accesses).
- */
-
-/**
- * ks_check_endian - Check whether endianness of the bus is correct
- * @ks : The chip information
- *
- * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit
- * bus. To maintain optimum performance, the bus endianness should be set
- * such that it matches the endianness of the CPU.
- */
-
-static int ks_check_endian(struct ks_net *ks)
-{
- u16 cider;
-
- /*
- * Read CIDER register first, however read it the "wrong" way around.
- * If the endian strap on the KS8851-16MLL in incorrect and the chip
- * is operating in different endianness than the CPU, then the meaning
- * of BE[3:0] byte-enable bits is also swapped such that:
- * BE[3,2,1,0] becomes BE[1,0,3,2]
- *
- * Luckily for us, the byte-enable bits are the top four MSbits of
- * the address register and the CIDER register is at offset 0xc0.
- * Hence, by reading address 0xc0c0, which is not impacted by endian
- * swapping, we assert either BE[3:2] or BE[1:0] while reading the
- * CIDER register.
- *
- * If the bus configuration is correct, reading 0xc0c0 asserts
- * BE[3:2] and this read returns 0x0000, because to read register
- * with bottom two LSbits of address set to 0, BE[1:0] must be
- * asserted.
- *
- * If the bus configuration is NOT correct, reading 0xc0c0 asserts
- * BE[1:0] and this read returns non-zero 0x8872 value.
- */
- iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd);
- cider = ioread16(ks->hw_addr);
- if (!cider)
- return 0;
-
- netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
-
- return -EINVAL;
-}
-
-/**
- * ks_rdreg16 - read 16 bit register from device
- * @ks : The chip information
- * @offset: The register address
- *
- * Read a 16bit register from the chip, returning the result
- */
-
-static u16 ks_rdreg16(struct ks_net *ks, int offset)
-{
- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
- return ioread16(ks->hw_addr);
-}
-
-/**
- * ks_wrreg16 - write 16bit register value to chip
- * @ks: The chip information
- * @offset: The register address
- * @value: The value to write
- *
- */
-
-static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
-{
- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
- iowrite16(value, ks->hw_addr);
-}
-
-/**
- * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
- * @ks: The chip state
- * @wptr: buffer address to save data
- * @len: length in byte to read
- *
- */
-static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
-{
- len >>= 1;
- while (len--)
- *wptr++ = (u16)ioread16(ks->hw_addr);
-}
-
-/**
- * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
- * @ks: The chip information
- * @wptr: buffer address
- * @len: length in byte to write
- *
- */
-static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
-{
- len >>= 1;
- while (len--)
- iowrite16(*wptr++, ks->hw_addr);
-}
-
-static void ks_disable_int(struct ks_net *ks)
-{
- ks_wrreg16(ks, KS_IER, 0x0000);
-} /* ks_disable_int */
-
-static void ks_enable_int(struct ks_net *ks)
-{
- ks_wrreg16(ks, KS_IER, ks->rc_ier);
-} /* ks_enable_int */
-
-/**
- * ks_tx_fifo_space - return the available hardware buffer size.
- * @ks: The chip information
- *
- */
-static inline u16 ks_tx_fifo_space(struct ks_net *ks)
-{
- return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
-}
-
-/**
- * ks_save_cmd_reg - save the command register from the cache.
- * @ks: The chip information
- *
- */
-static inline void ks_save_cmd_reg(struct ks_net *ks)
-{
- /*ks8851 MLL has a bug to read back the command register.
- * So rely on software to save the content of command register.
- */
- ks->cmd_reg_cache_int = ks->cmd_reg_cache;
-}
-
-/**
- * ks_restore_cmd_reg - restore the command register from the cache and
- * write to hardware register.
- * @ks: The chip information
- *
- */
-static inline void ks_restore_cmd_reg(struct ks_net *ks)
-{
- ks->cmd_reg_cache = ks->cmd_reg_cache_int;
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
-}
-
-/**
- * ks_set_powermode - set power mode of the device
- * @ks: The chip information
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
-{
- unsigned pmecr;
-
- netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
- ks_rdreg16(ks, KS_GRR);
- pmecr = ks_rdreg16(ks, KS_PMECR);
- pmecr &= ~PMECR_PM_MASK;
- pmecr |= pwrmode;
-
- ks_wrreg16(ks, KS_PMECR, pmecr);
-}
-
-/**
- * ks_read_config - read chip configuration of bus width.
- * @ks: The chip information
- *
- */
-static void ks_read_config(struct ks_net *ks)
-{
- u16 reg_data = 0;
-
- /* Regardless of bus width, 8 bit read should always work.*/
- reg_data = ks_rdreg16(ks, KS_CCR);
-
- /* addr/data bus are multiplexed */
- ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
-
- /* There are garbage data when reading data from QMU,
- depending on bus-width.
- */
-
- if (reg_data & CCR_8BIT) {
- ks->bus_width = ENUM_BUS_8BIT;
- ks->extra_byte = 1;
- } else if (reg_data & CCR_16BIT) {
- ks->bus_width = ENUM_BUS_16BIT;
- ks->extra_byte = 2;
- } else {
- ks->bus_width = ENUM_BUS_32BIT;
- ks->extra_byte = 4;
- }
-}
-
-/**
- * ks_soft_reset - issue one of the soft reset to the device
- * @ks: The device state.
- * @op: The bit(s) to set in the GRR
- *
- * Issue the relevant soft-reset command to the device's GRR register
- * specified by @op.
- *
- * Note, the delays are in there as a caution to ensure that the reset
- * has time to take effect and then complete. Since the datasheet does
- * not currently specify the exact sequence, we have chosen something
- * that seems to work with our device.
- */
-static void ks_soft_reset(struct ks_net *ks, unsigned op)
-{
- /* Disable interrupt first */
- ks_wrreg16(ks, KS_IER, 0x0000);
- ks_wrreg16(ks, KS_GRR, op);
- mdelay(10); /* wait a short time to effect reset */
- ks_wrreg16(ks, KS_GRR, 0);
- mdelay(1); /* wait for condition to clear */
-}
-
-
-static void ks_enable_qmu(struct ks_net *ks)
-{
- u16 w;
-
- w = ks_rdreg16(ks, KS_TXCR);
- /* Enables QMU Transmit (TXCR). */
- ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
-
- /*
- * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
- * Enable
- */
-
- w = ks_rdreg16(ks, KS_RXQCR);
- ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
-
- /* Enables QMU Receive (RXCR1). */
- w = ks_rdreg16(ks, KS_RXCR1);
- ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
- ks->enabled = true;
-} /* ks_enable_qmu */
-
-static void ks_disable_qmu(struct ks_net *ks)
-{
- u16 w;
-
- w = ks_rdreg16(ks, KS_TXCR);
-
- /* Disables QMU Transmit (TXCR). */
- w &= ~TXCR_TXE;
- ks_wrreg16(ks, KS_TXCR, w);
-
- /* Disables QMU Receive (RXCR1). */
- w = ks_rdreg16(ks, KS_RXCR1);
- w &= ~RXCR1_RXE ;
- ks_wrreg16(ks, KS_RXCR1, w);
-
- ks->enabled = false;
-
-} /* ks_disable_qmu */
-
-/**
- * ks_read_qmu - read 1 pkt data from the QMU.
- * @ks: The chip information
- * @buf: buffer address to save 1 pkt
- * @len: Pkt length
- * Here is the sequence to read 1 pkt:
- * 1. set sudo DMA mode
- * 2. read prepend data
- * 3. read pkt data
- * 4. reset sudo DMA Mode
- */
-static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
-{
- u32 r = ks->extra_byte & 0x1 ;
- u32 w = ks->extra_byte - r;
-
- /* 1. set sudo DMA mode */
- ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
-
- /* 2. read prepend data */
- /**
- * read 4 + extra bytes and discard them.
- * extra bytes for dummy, 2 for status, 2 for len
- */
-
- /* use likely(r) for 8 bit access for performance */
- if (unlikely(r))
- ioread8(ks->hw_addr);
- ks_inblk(ks, buf, w + 2 + 2);
-
- /* 3. read pkt data */
- ks_inblk(ks, buf, ALIGN(len, 4));
-
- /* 4. reset sudo DMA Mode */
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
-}
-
-/**
- * ks_rcv - read multiple pkts data from the QMU.
- * @ks: The chip information
- * @netdev: The network device being opened.
- *
- * Read all of header information before reading pkt content.
- * It is not allowed only port of pkts in QMU after issuing
- * interrupt ack.
- */
-static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
-{
- u32 i;
- struct type_frame_head *frame_hdr = ks->frame_head_info;
- struct sk_buff *skb;
-
- ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
-
- /* read all header information */
- for (i = 0; i < ks->frame_cnt; i++) {
- /* Checking Received packet status */
- frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
- /* Get packet len from hardware */
- frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
- frame_hdr++;
- }
-
- frame_hdr = ks->frame_head_info;
- while (ks->frame_cnt--) {
- if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
- frame_hdr->len >= RX_BUF_SIZE ||
- frame_hdr->len <= 0)) {
-
- /* discard an invalid packet */
- ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
- netdev->stats.rx_dropped++;
- if (!(frame_hdr->sts & RXFSHR_RXFV))
- netdev->stats.rx_frame_errors++;
- else
- netdev->stats.rx_length_errors++;
- frame_hdr++;
- continue;
- }
-
- skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
- if (likely(skb)) {
- skb_reserve(skb, 2);
- /* read data block including CRC 4 bytes */
- ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
- skb_put(skb, frame_hdr->len - 4);
- skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
- /* exclude CRC size */
- netdev->stats.rx_bytes += frame_hdr->len - 4;
- netdev->stats.rx_packets++;
- } else {
- ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
- netdev->stats.rx_dropped++;
- }
- frame_hdr++;
- }
-}
-
-/**
- * ks_update_link_status - link status update.
- * @netdev: The network device being opened.
- * @ks: The chip information
- *
- */
-
-static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
-{
- /* check the status of the link */
- u32 link_up_status;
- if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
- netif_carrier_on(netdev);
- link_up_status = true;
- } else {
- netif_carrier_off(netdev);
- link_up_status = false;
- }
- netif_dbg(ks, link, ks->netdev,
- "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
-}
-
-/**
- * ks_irq - device interrupt handler
- * @irq: Interrupt number passed from the IRQ handler.
- * @pw: The private word passed to register_irq(), our struct ks_net.
- *
- * This is the handler invoked to find out what happened
- *
- * Read the interrupt status, work out what needs to be done and then clear
- * any of the interrupts that are not needed.
- */
-
-static irqreturn_t ks_irq(int irq, void *pw)
-{
- struct net_device *netdev = pw;
- struct ks_net *ks = netdev_priv(netdev);
- unsigned long flags;
- u16 status;
-
- spin_lock_irqsave(&ks->statelock, flags);
- /*this should be the first in IRQ handler */
- ks_save_cmd_reg(ks);
-
- status = ks_rdreg16(ks, KS_ISR);
- if (unlikely(!status)) {
- ks_restore_cmd_reg(ks);
- spin_unlock_irqrestore(&ks->statelock, flags);
- return IRQ_NONE;
- }
-
- ks_wrreg16(ks, KS_ISR, status);
-
- if (likely(status & IRQ_RXI))
- ks_rcv(ks, netdev);
-
- if (unlikely(status & IRQ_LCI))
- ks_update_link_status(netdev, ks);
-
- if (unlikely(status & IRQ_TXI))
- netif_wake_queue(netdev);
-
- if (unlikely(status & IRQ_LDI)) {
-
- u16 pmecr = ks_rdreg16(ks, KS_PMECR);
- pmecr &= ~PMECR_WKEVT_MASK;
- ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
- }
-
- if (unlikely(status & IRQ_RXOI))
- ks->netdev->stats.rx_over_errors++;
- /* this should be the last in IRQ handler*/
- ks_restore_cmd_reg(ks);
- spin_unlock_irqrestore(&ks->statelock, flags);
- return IRQ_HANDLED;
-}
-
-
-/**
- * ks_net_open - open network device
- * @netdev: The network device being opened.
- *
- * Called when the network device is marked active, such as a user executing
- * 'ifconfig up' on the device.
- */
-static int ks_net_open(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- int err;
-
-#define KS_INT_FLAGS IRQF_TRIGGER_LOW
- /* lock the card, even if we may not actually do anything
- * else at the moment.
- */
-
- netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
-
- /* reset the HW */
- err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
-
- if (err) {
- pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
- return err;
- }
-
- /* wake up powermode to normal mode */
- ks_set_powermode(ks, PMECR_PM_NORMAL);
- mdelay(1); /* wait for normal mode to take effect */
-
- ks_wrreg16(ks, KS_ISR, 0xffff);
- ks_enable_int(ks);
- ks_enable_qmu(ks);
- netif_start_queue(ks->netdev);
-
- netif_dbg(ks, ifup, ks->netdev, "network device up\n");
-
- return 0;
-}
-
-/**
- * ks_net_stop - close network device
- * @netdev: The device being closed.
- *
- * Called to close down a network device which has been active. Cancell any
- * work, shutdown the RX and TX process and then place the chip into a low
- * power state whilst it is not being used.
- */
-static int ks_net_stop(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
-
- netif_info(ks, ifdown, netdev, "shutting down\n");
-
- netif_stop_queue(netdev);
-
- mutex_lock(&ks->lock);
-
- /* turn off the IRQs and ack any outstanding */
- ks_wrreg16(ks, KS_IER, 0x0000);
- ks_wrreg16(ks, KS_ISR, 0xffff);
-
- /* shutdown RX/TX QMU */
- ks_disable_qmu(ks);
- ks_disable_int(ks);
-
- /* set powermode to soft power down to save power */
- ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
- free_irq(netdev->irq, netdev);
- mutex_unlock(&ks->lock);
- return 0;
-}
-
-
-/**
- * ks_write_qmu - write 1 pkt data to the QMU.
- * @ks: The chip information
- * @pdata: buffer address to save 1 pkt
- * @len: Pkt length in byte
- * Here is the sequence to write 1 pkt:
- * 1. set sudo DMA mode
- * 2. write status/length
- * 3. write pkt data
- * 4. reset sudo DMA Mode
- * 5. reset sudo DMA mode
- * 6. Wait until pkt is out
- */
-static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
-{
- /* start header at txb[0] to align txw entries */
- ks->txh.txw[0] = 0;
- ks->txh.txw[1] = cpu_to_le16(len);
-
- /* 1. set sudo-DMA mode */
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
- /* 2. write status/lenth info */
- ks_outblk(ks, ks->txh.txw, 4);
- /* 3. write pkt data */
- ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
- /* 4. reset sudo-DMA mode */
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
- /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
- ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
- /* 6. wait until TXQCR_METFE is auto-cleared */
- while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
- ;
-}
-
-/**
- * ks_start_xmit - transmit packet
- * @skb : The buffer to transmit
- * @netdev : The device used to transmit the packet.
- *
- * Called by the network layer to transmit the @skb.
- * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
- * So while tx is in-progress, prevent IRQ interrupt from happenning.
- */
-static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- netdev_tx_t retv = NETDEV_TX_OK;
- struct ks_net *ks = netdev_priv(netdev);
- unsigned long flags;
-
- spin_lock_irqsave(&ks->statelock, flags);
-
- /* Extra space are required:
- * 4 byte for alignment, 4 for status/length, 4 for CRC
- */
-
- if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
- ks_write_qmu(ks, skb->data, skb->len);
- /* add tx statistics */
- netdev->stats.tx_bytes += skb->len;
- netdev->stats.tx_packets++;
- dev_kfree_skb(skb);
- } else
- retv = NETDEV_TX_BUSY;
- spin_unlock_irqrestore(&ks->statelock, flags);
- return retv;
-}
-
-/**
- * ks_start_rx - ready to serve pkts
- * @ks : The chip information
- *
- */
-static void ks_start_rx(struct ks_net *ks)
-{
- u16 cntl;
-
- /* Enables QMU Receive (RXCR1). */
- cntl = ks_rdreg16(ks, KS_RXCR1);
- cntl |= RXCR1_RXE ;
- ks_wrreg16(ks, KS_RXCR1, cntl);
-} /* ks_start_rx */
-
-/**
- * ks_stop_rx - stop to serve pkts
- * @ks : The chip information
- *
- */
-static void ks_stop_rx(struct ks_net *ks)
-{
- u16 cntl;
-
- /* Disables QMU Receive (RXCR1). */
- cntl = ks_rdreg16(ks, KS_RXCR1);
- cntl &= ~RXCR1_RXE ;
- ks_wrreg16(ks, KS_RXCR1, cntl);
-
-} /* ks_stop_rx */
-
-static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
-
-static unsigned long ether_gen_crc(int length, u8 *data)
-{
- long crc = -1;
- while (--length >= 0) {
- u8 current_octet = *data++;
- int bit;
-
- for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
- crc = (crc << 1) ^
- ((crc < 0) ^ (current_octet & 1) ?
- ethernet_polynomial : 0);
- }
- }
- return (unsigned long)crc;
-} /* ether_gen_crc */
-
-/**
-* ks_set_grpaddr - set multicast information
-* @ks : The chip information
-*/
-
-static void ks_set_grpaddr(struct ks_net *ks)
-{
- u8 i;
- u32 index, position, value;
-
- memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
-
- for (i = 0; i < ks->mcast_lst_size; i++) {
- position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
- index = position >> 3;
- value = 1 << (position & 7);
- ks->mcast_bits[index] |= (u8)value;
- }
-
- for (i = 0; i < HW_MCAST_SIZE; i++) {
- if (i & 1) {
- ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
- (ks->mcast_bits[i] << 8) |
- ks->mcast_bits[i - 1]);
- }
- }
-} /* ks_set_grpaddr */
-
-/**
-* ks_clear_mcast - clear multicast information
-*
-* @ks : The chip information
-* This routine removes all mcast addresses set in the hardware.
-*/
-
-static void ks_clear_mcast(struct ks_net *ks)
-{
- u16 i, mcast_size;
- for (i = 0; i < HW_MCAST_SIZE; i++)
- ks->mcast_bits[i] = 0;
-
- mcast_size = HW_MCAST_SIZE >> 2;
- for (i = 0; i < mcast_size; i++)
- ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
-}
-
-static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
-{
- u16 cntl;
- ks->promiscuous = promiscuous_mode;
- ks_stop_rx(ks); /* Stop receiving for reconfiguration */
- cntl = ks_rdreg16(ks, KS_RXCR1);
-
- cntl &= ~RXCR1_FILTER_MASK;
- if (promiscuous_mode)
- /* Enable Promiscuous mode */
- cntl |= RXCR1_RXAE | RXCR1_RXINVF;
- else
- /* Disable Promiscuous mode (default normal mode) */
- cntl |= RXCR1_RXPAFMA;
-
- ks_wrreg16(ks, KS_RXCR1, cntl);
-
- if (ks->enabled)
- ks_start_rx(ks);
-
-} /* ks_set_promis */
-
-static void ks_set_mcast(struct ks_net *ks, u16 mcast)
-{
- u16 cntl;
-
- ks->all_mcast = mcast;
- ks_stop_rx(ks); /* Stop receiving for reconfiguration */
- cntl = ks_rdreg16(ks, KS_RXCR1);
- cntl &= ~RXCR1_FILTER_MASK;
- if (mcast)
- /* Enable "Perfect with Multicast address passed mode" */
- cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
- else
- /**
- * Disable "Perfect with Multicast address passed
- * mode" (normal mode).
- */
- cntl |= RXCR1_RXPAFMA;
-
- ks_wrreg16(ks, KS_RXCR1, cntl);
-
- if (ks->enabled)
- ks_start_rx(ks);
-} /* ks_set_mcast */
-
-static void ks_set_rx_mode(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- struct netdev_hw_addr *ha;
-
- /* Turn on/off promiscuous mode. */
- if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
- ks_set_promis(ks,
- (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
- /* Turn on/off all mcast mode. */
- else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
- ks_set_mcast(ks,
- (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
- else
- ks_set_promis(ks, false);
-
- if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
- if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
- int i = 0;
-
- netdev_for_each_mc_addr(ha, netdev) {
- if (i >= MAX_MCAST_LST)
- break;
- memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
- }
- ks->mcast_lst_size = (u8)i;
- ks_set_grpaddr(ks);
- } else {
- /**
- * List too big to support so
- * turn on all mcast mode.
- */
- ks->mcast_lst_size = MAX_MCAST_LST;
- ks_set_mcast(ks, true);
- }
- } else {
- ks->mcast_lst_size = 0;
- ks_clear_mcast(ks);
- }
-} /* ks_set_rx_mode */
-
-static void ks_set_mac(struct ks_net *ks, u8 *data)
-{
- u16 *pw = (u16 *)data;
- u16 w, u;
-
- ks_stop_rx(ks); /* Stop receiving for reconfiguration */
-
- u = *pw++;
- w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
- ks_wrreg16(ks, KS_MARH, w);
-
- u = *pw++;
- w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
- ks_wrreg16(ks, KS_MARM, w);
-
- u = *pw;
- w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
- ks_wrreg16(ks, KS_MARL, w);
-
- memcpy(ks->mac_addr, data, ETH_ALEN);
-
- if (ks->enabled)
- ks_start_rx(ks);
-}
-
-static int ks_set_mac_address(struct net_device *netdev, void *paddr)
-{
- struct ks_net *ks = netdev_priv(netdev);
- struct sockaddr *addr = paddr;
- u8 *da;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- da = (u8 *)netdev->dev_addr;
-
- ks_set_mac(ks, da);
- return 0;
-}
-
-static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
-{
- struct ks_net *ks = netdev_priv(netdev);
-
- if (!netif_running(netdev))
- return -EINVAL;
-
- return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
-}
-
-static const struct net_device_ops ks_netdev_ops = {
- .ndo_open = ks_net_open,
- .ndo_stop = ks_net_stop,
- .ndo_do_ioctl = ks_net_ioctl,
- .ndo_start_xmit = ks_start_xmit,
- .ndo_set_mac_address = ks_set_mac_address,
- .ndo_set_rx_mode = ks_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* ethtool support */
-
-static void ks_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *di)
-{
- strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
- strlcpy(di->version, "1.00", sizeof(di->version));
- strlcpy(di->bus_info, dev_name(netdev->dev.parent),
- sizeof(di->bus_info));
-}
-
-static u32 ks_get_msglevel(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- return ks->msg_enable;
-}
-
-static void ks_set_msglevel(struct net_device *netdev, u32 to)
-{
- struct ks_net *ks = netdev_priv(netdev);
- ks->msg_enable = to;
-}
-
-static int ks_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct ks_net *ks = netdev_priv(netdev);
-
- mii_ethtool_get_link_ksettings(&ks->mii, cmd);
-
- return 0;
-}
-
-static int ks_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct ks_net *ks = netdev_priv(netdev);
- return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
-}
-
-static u32 ks_get_link(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- return mii_link_ok(&ks->mii);
-}
-
-static int ks_nway_reset(struct net_device *netdev)
-{
- struct ks_net *ks = netdev_priv(netdev);
- return mii_nway_restart(&ks->mii);
-}
-
-static const struct ethtool_ops ks_ethtool_ops = {
- .get_drvinfo = ks_get_drvinfo,
- .get_msglevel = ks_get_msglevel,
- .set_msglevel = ks_set_msglevel,
- .get_link = ks_get_link,
- .nway_reset = ks_nway_reset,
- .get_link_ksettings = ks_get_link_ksettings,
- .set_link_ksettings = ks_set_link_ksettings,
-};
-
-/* MII interface controls */
-
-/**
- * ks_phy_reg - convert MII register into a KS8851 register
- * @reg: MII register number.
- *
- * Return the KS8851 register number for the corresponding MII PHY register
- * if possible. Return zero if the MII register has no direct mapping to the
- * KS8851 register set.
- */
-static int ks_phy_reg(int reg)
-{
- switch (reg) {
- case MII_BMCR:
- return KS_P1MBCR;
- case MII_BMSR:
- return KS_P1MBSR;
- case MII_PHYSID1:
- return KS_PHY1ILR;
- case MII_PHYSID2:
- return KS_PHY1IHR;
- case MII_ADVERTISE:
- return KS_P1ANAR;
- case MII_LPA:
- return KS_P1ANLPR;
- }
-
- return 0x0;
-}
-
-/**
- * ks_phy_read - MII interface PHY register read.
- * @netdev: The network device the PHY is on.
- * @phy_addr: Address of PHY (ignored as we only have one)
- * @reg: The register to read.
- *
- * This call reads data from the PHY register specified in @reg. Since the
- * device does not support all the MII registers, the non-existent values
- * are always returned as zero.
- *
- * We return zero for unsupported registers as the MII code does not check
- * the value returned for any error status, and simply returns it to the
- * caller. The mii-tool that the driver was tested with takes any -ve error
- * as real PHY capabilities, thus displaying incorrect data to the user.
- */
-static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
-{
- struct ks_net *ks = netdev_priv(netdev);
- int ksreg;
- int result;
-
- ksreg = ks_phy_reg(reg);
- if (!ksreg)
- return 0x0; /* no error return allowed, so use zero */
-
- mutex_lock(&ks->lock);
- result = ks_rdreg16(ks, ksreg);
- mutex_unlock(&ks->lock);
-
- return result;
-}
-
-static void ks_phy_write(struct net_device *netdev,
- int phy, int reg, int value)
-{
- struct ks_net *ks = netdev_priv(netdev);
- int ksreg;
-
- ksreg = ks_phy_reg(reg);
- if (ksreg) {
- mutex_lock(&ks->lock);
- ks_wrreg16(ks, ksreg, value);
- mutex_unlock(&ks->lock);
- }
-}
-
-/**
- * ks_read_selftest - read the selftest memory info.
- * @ks: The device state
- *
- * Read and check the TX/RX memory selftest information.
- */
-static int ks_read_selftest(struct ks_net *ks)
-{
- unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
- int ret = 0;
- unsigned rd;
-
- rd = ks_rdreg16(ks, KS_MBIR);
-
- if ((rd & both_done) != both_done) {
- netdev_warn(ks->netdev, "Memory selftest not finished\n");
- return 0;
- }
-
- if (rd & MBIR_TXMBFA) {
- netdev_err(ks->netdev, "TX memory selftest fails\n");
- ret |= 1;
- }
-
- if (rd & MBIR_RXMBFA) {
- netdev_err(ks->netdev, "RX memory selftest fails\n");
- ret |= 2;
- }
-
- netdev_info(ks->netdev, "the selftest passes\n");
- return ret;
-}
-
-static void ks_setup(struct ks_net *ks)
-{
- u16 w;
-
- /**
- * Configure QMU Transmit
- */
-
- /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
- ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
-
- /* Setup Receive Frame Data Pointer Auto-Increment */
- ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
-
- /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
- ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
-
- /* Setup RxQ Command Control (RXQCR) */
- ks->rc_rxqcr = RXQCR_CMD_CNTL;
- ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
-
- /**
- * set the force mode to half duplex, default is full duplex
- * because if the auto-negotiation fails, most switch uses
- * half-duplex.
- */
-
- w = ks_rdreg16(ks, KS_P1MBCR);
- w &= ~BMCR_FULLDPLX;
- ks_wrreg16(ks, KS_P1MBCR, w);
-
- w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
- ks_wrreg16(ks, KS_TXCR, w);
-
- w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
-
- if (ks->promiscuous) /* bPromiscuous */
- w |= (RXCR1_RXAE | RXCR1_RXINVF);
- else if (ks->all_mcast) /* Multicast address passed mode */
- w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
- else /* Normal mode */
- w |= RXCR1_RXPAFMA;
-
- ks_wrreg16(ks, KS_RXCR1, w);
-} /*ks_setup */
-
-
-static void ks_setup_int(struct ks_net *ks)
-{
- ks->rc_ier = 0x00;
- /* Clear the interrupts status of the hardware. */
- ks_wrreg16(ks, KS_ISR, 0xffff);
-
- /* Enables the interrupts of the hardware. */
- ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
-} /* ks_setup_int */
-
-static int ks_hw_init(struct ks_net *ks)
-{
-#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
- ks->promiscuous = 0;
- ks->all_mcast = 0;
- ks->mcast_lst_size = 0;
-
- ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
- GFP_KERNEL);
- if (!ks->frame_head_info)
- return false;
-
- ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
- return true;
-}
-
-#if defined(CONFIG_OF)
-static const struct of_device_id ks8851_ml_dt_ids[] = {
- { .compatible = "micrel,ks8851-mll" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
-#endif
-
-static int ks8851_probe(struct platform_device *pdev)
-{
- int err;
- struct net_device *netdev;
- struct ks_net *ks;
- u16 id, data;
- const char *mac;
-
- netdev = alloc_etherdev(sizeof(struct ks_net));
- if (!netdev)
- return -ENOMEM;
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- ks = netdev_priv(netdev);
- ks->netdev = netdev;
-
- ks->hw_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ks->hw_addr)) {
- err = PTR_ERR(ks->hw_addr);
- goto err_free;
- }
-
- ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(ks->hw_addr_cmd)) {
- err = PTR_ERR(ks->hw_addr_cmd);
- goto err_free;
- }
-
- err = ks_check_endian(ks);
- if (err)
- goto err_free;
-
- netdev->irq = platform_get_irq(pdev, 0);
-
- if ((int)netdev->irq < 0) {
- err = netdev->irq;
- goto err_free;
- }
-
- ks->pdev = pdev;
-
- mutex_init(&ks->lock);
- spin_lock_init(&ks->statelock);
-
- netdev->netdev_ops = &ks_netdev_ops;
- netdev->ethtool_ops = &ks_ethtool_ops;
-
- /* setup mii state */
- ks->mii.dev = netdev;
- ks->mii.phy_id = 1,
- ks->mii.phy_id_mask = 1;
- ks->mii.reg_num_mask = 0xf;
- ks->mii.mdio_read = ks_phy_read;
- ks->mii.mdio_write = ks_phy_write;
-
- netdev_info(netdev, "message enable is %d\n", msg_enable);
- /* set the default message enable */
- ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
- NETIF_MSG_PROBE |
- NETIF_MSG_LINK));
- ks_read_config(ks);
-
- /* simple check for a valid chip being connected to the bus */
- if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
- netdev_err(netdev, "failed to read device ID\n");
- err = -ENODEV;
- goto err_free;
- }
-
- if (ks_read_selftest(ks)) {
- netdev_err(netdev, "failed to read device ID\n");
- err = -ENODEV;
- goto err_free;
- }
-
- err = register_netdev(netdev);
- if (err)
- goto err_free;
-
- platform_set_drvdata(pdev, netdev);
-
- ks_soft_reset(ks, GRR_GSR);
- ks_hw_init(ks);
- ks_disable_qmu(ks);
- ks_setup(ks);
- ks_setup_int(ks);
-
- data = ks_rdreg16(ks, KS_OBCR);
- ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
-
- /* overwriting the default MAC address */
- if (pdev->dev.of_node) {
- mac = of_get_mac_address(pdev->dev.of_node);
- if (!IS_ERR(mac))
- ether_addr_copy(ks->mac_addr, mac);
- } else {
- struct ks8851_mll_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- netdev_err(netdev, "No platform data\n");
- err = -ENODEV;
- goto err_pdata;
- }
- memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
- }
- if (!is_valid_ether_addr(ks->mac_addr)) {
- /* Use random MAC address if none passed */
- eth_random_addr(ks->mac_addr);
- netdev_info(netdev, "Using random mac address\n");
- }
- netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
-
- memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
-
- ks_set_mac(ks, netdev->dev_addr);
-
- id = ks_rdreg16(ks, KS_CIDER);
-
- netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
- (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
- return 0;
-
-err_pdata:
- unregister_netdev(netdev);
-err_free:
- free_netdev(netdev);
- return err;
-}
-
-static int ks8851_remove(struct platform_device *pdev)
-{
- struct net_device *netdev = platform_get_drvdata(pdev);
-
- unregister_netdev(netdev);
- free_netdev(netdev);
- return 0;
-
-}
-
-static struct platform_driver ks8851_platform_driver = {
- .driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(ks8851_ml_dt_ids),
- },
- .probe = ks8851_probe,
- .remove = ks8851_remove,
-};
-
-module_platform_driver(ks8851_platform_driver);
-
-MODULE_DESCRIPTION("KS8851 MLL Network driver");
-MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
-MODULE_LICENSE("GPL");
-module_param_named(message, msg_enable, int, 0);
-MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
-
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
new file mode 100644
index 000000000000..3bab0cb2b1a5
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* drivers/net/ethernet/micrel/ks8851.c
+ *
+ * Copyright 2009 Simtec Electronics
+ * http://www.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+
+#include "ks8851.h"
+
+static int msg_enable;
+
+#define BE3 0x8000 /* Byte Enable 3 */
+#define BE2 0x4000 /* Byte Enable 2 */
+#define BE1 0x2000 /* Byte Enable 1 */
+#define BE0 0x1000 /* Byte Enable 0 */
+
+/**
+ * struct ks8851_net_par - KS8851 Parallel driver private data
+ * @ks8851: KS8851 driver common private data
+ * @lock: Lock to ensure that the device is not accessed when busy.
+ * @hw_addr : start address of data register.
+ * @hw_addr_cmd : start address of command register.
+ * @cmd_reg_cache : command register cached.
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not accessible until the transfer is finished
+ * and the DMA has been de-asserted.
+ */
+struct ks8851_net_par {
+ struct ks8851_net ks8851;
+ spinlock_t lock;
+ void __iomem *hw_addr;
+ void __iomem *hw_addr_cmd;
+ u16 cmd_reg_cache;
+};
+
+#define to_ks8851_par(ks) container_of((ks), struct ks8851_net_par, ks8851)
+
+/**
+ * ks8851_lock_par - register access lock
+ * @ks: The chip state
+ * @flags: Spinlock flags
+ *
+ * Claim chip register access lock
+ */
+static void ks8851_lock_par(struct ks8851_net *ks, unsigned long *flags)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ spin_lock_irqsave(&ksp->lock, *flags);
+}
+
+/**
+ * ks8851_unlock_par - register access unlock
+ * @ks: The chip state
+ * @flags: Spinlock flags
+ *
+ * Release chip register access lock
+ */
+static void ks8851_unlock_par(struct ks8851_net *ks, unsigned long *flags)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ spin_unlock_irqrestore(&ksp->lock, *flags);
+}
+
+/**
+ * ks_check_endian - Check whether endianness of the bus is correct
+ * @ks : The chip information
+ *
+ * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit
+ * bus. To maintain optimum performance, the bus endianness should be set
+ * such that it matches the endianness of the CPU.
+ */
+static int ks_check_endian(struct ks8851_net *ks)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+ u16 cider;
+
+ /*
+ * Read CIDER register first, however read it the "wrong" way around.
+ * If the endian strap on the KS8851-16MLL in incorrect and the chip
+ * is operating in different endianness than the CPU, then the meaning
+ * of BE[3:0] byte-enable bits is also swapped such that:
+ * BE[3,2,1,0] becomes BE[1,0,3,2]
+ *
+ * Luckily for us, the byte-enable bits are the top four MSbits of
+ * the address register and the CIDER register is at offset 0xc0.
+ * Hence, by reading address 0xc0c0, which is not impacted by endian
+ * swapping, we assert either BE[3:2] or BE[1:0] while reading the
+ * CIDER register.
+ *
+ * If the bus configuration is correct, reading 0xc0c0 asserts
+ * BE[3:2] and this read returns 0x0000, because to read register
+ * with bottom two LSbits of address set to 0, BE[1:0] must be
+ * asserted.
+ *
+ * If the bus configuration is NOT correct, reading 0xc0c0 asserts
+ * BE[1:0] and this read returns non-zero 0x8872 value.
+ */
+ iowrite16(BE3 | BE2 | KS_CIDER, ksp->hw_addr_cmd);
+ cider = ioread16(ksp->hw_addr);
+ if (!cider)
+ return 0;
+
+ netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
+
+ return -EINVAL;
+}
+
+/**
+ * ks8851_wrreg16_par - write 16bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg16_par(struct ks8851_net *ks, unsigned int reg,
+ unsigned int val)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ ksp->cmd_reg_cache = (u16)reg | ((BE1 | BE0) << (reg & 0x02));
+ iowrite16(ksp->cmd_reg_cache, ksp->hw_addr_cmd);
+ iowrite16(val, ksp->hw_addr);
+}
+
+/**
+ * ks8851_rdreg16_par - read 16 bit register from chip
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+ */
+static unsigned int ks8851_rdreg16_par(struct ks8851_net *ks, unsigned int reg)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ ksp->cmd_reg_cache = (u16)reg | ((BE1 | BE0) << (reg & 0x02));
+ iowrite16(ksp->cmd_reg_cache, ksp->hw_addr_cmd);
+ return ioread16(ksp->hw_addr);
+}
+
+/**
+ * ks8851_rdfifo_par - read data from the receive fifo
+ * @ks: The device state.
+ * @buff: The buffer address
+ * @len: The length of the data to read
+ *
+ * Issue an RXQ FIFO read command and read the @len amount of data from
+ * the FIFO into the buffer specified by @buff.
+ */
+static void ks8851_rdfifo_par(struct ks8851_net *ks, u8 *buff, unsigned int len)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
+
+ ioread16_rep(ksp->hw_addr, (u16 *)buff + 1, len / 2);
+}
+
+/**
+ * ks8851_wrfifo_par - write packet to TX FIFO
+ * @ks: The device state.
+ * @txp: The sk_buff to transmit.
+ * @irq: IRQ on completion of the packet.
+ *
+ * Send the @txp to the chip. This means creating the relevant packet header
+ * specifying the length of the packet and the other information the chip
+ * needs, such as IRQ on completion. Send the header and the packet data to
+ * the device.
+ */
+static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
+ bool irq)
+{
+ struct ks8851_net_par *ksp = to_ks8851_par(ks);
+ unsigned int len = ALIGN(txp->len, 4);
+ unsigned int fid = 0;
+
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
+
+ fid = ks->fid++;
+ fid &= TXFR_TXFID_MASK;
+
+ if (irq)
+ fid |= TXFR_TXIC; /* irq on completion */
+
+ iowrite16(fid, ksp->hw_addr);
+ iowrite16(txp->len, ksp->hw_addr);
+
+ iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
+}
+
+/**
+ * ks8851_rx_skb_par - receive skbuff
+ * @ks: The device state.
+ * @skb: The skbuff
+ */
+static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
+{
+ netif_rx(skb);
+}
+
+static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
+{
+ return ks8851_rdreg16_par(ks, KS_TXQCR);
+}
+
+/**
+ * ks8851_start_xmit_par - transmit packet
+ * @skb: The buffer to transmit
+ * @dev: The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb. Queue the packet for
+ * the device and schedule the necessary work to transmit the packet when
+ * it is free.
+ *
+ * We do this to firstly avoid sleeping with the network device locked,
+ * and secondly so we can round up more than one packet to transmit which
+ * means we can try and avoid generating too many transmit done interrupts.
+ */
+static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ netdev_tx_t ret = NETDEV_TX_OK;
+ unsigned long flags;
+ unsigned int txqcr;
+ u16 txmir;
+ int err;
+
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+
+ ks8851_lock_par(ks, &flags);
+
+ txmir = ks8851_rdreg16_par(ks, KS_TXMIR) & 0x1fff;
+
+ if (likely(txmir >= skb->len + 12)) {
+ ks8851_wrreg16_par(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrfifo_par(ks, skb, false);
+ ks8851_wrreg16_par(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks8851_wrreg16_par(ks, KS_TXQCR, TXQCR_METFE);
+
+ err = readx_poll_timeout_atomic(ks8851_rdreg16_par_txqcr, ks,
+ txqcr, !(txqcr & TXQCR_METFE),
+ 5, 1000000);
+ if (err)
+ ret = NETDEV_TX_BUSY;
+
+ ks8851_done_tx(ks, skb);
+ } else {
+ ret = NETDEV_TX_BUSY;
+ }
+
+ ks8851_unlock_par(ks, &flags);
+
+ return ret;
+}
+
+static int ks8851_probe_par(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ks8851_net_par *ksp;
+ struct net_device *netdev;
+ struct ks8851_net *ks;
+ int ret;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(struct ks8851_net_par));
+ if (!netdev)
+ return -ENOMEM;
+
+ ks = netdev_priv(netdev);
+
+ ks->lock = ks8851_lock_par;
+ ks->unlock = ks8851_unlock_par;
+ ks->rdreg16 = ks8851_rdreg16_par;
+ ks->wrreg16 = ks8851_wrreg16_par;
+ ks->rdfifo = ks8851_rdfifo_par;
+ ks->wrfifo = ks8851_wrfifo_par;
+ ks->start_xmit = ks8851_start_xmit_par;
+ ks->rx_skb = ks8851_rx_skb_par;
+
+#define STD_IRQ (IRQ_LCI | /* Link Change */ \
+ IRQ_RXI | /* RX done */ \
+ IRQ_RXPSI) /* RX process stop */
+ ks->rc_ier = STD_IRQ;
+
+ ksp = to_ks8851_par(ks);
+ spin_lock_init(&ksp->lock);
+
+ ksp->hw_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ksp->hw_addr))
+ return PTR_ERR(ksp->hw_addr);
+
+ ksp->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ksp->hw_addr_cmd))
+ return PTR_ERR(ksp->hw_addr_cmd);
+
+ ret = ks_check_endian(ks);
+ if (ret)
+ return ret;
+
+ netdev->irq = platform_get_irq(pdev, 0);
+
+ return ks8851_probe_common(netdev, dev, msg_enable);
+}
+
+static int ks8851_remove_par(struct platform_device *pdev)
+{
+ return ks8851_remove_common(&pdev->dev);
+}
+
+static const struct of_device_id ks8851_match_table[] = {
+ { .compatible = "micrel,ks8851-mll" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ks8851_match_table);
+
+static struct platform_driver ks8851_driver = {
+ .driver = {
+ .name = "ks8851",
+ .of_match_table = ks8851_match_table,
+ .pm = &ks8851_pm_ops,
+ },
+ .probe = ks8851_probe_par,
+ .remove = ks8851_remove_par,
+};
+module_platform_driver(ks8851_driver);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
new file mode 100644
index 000000000000..4ec7f1615977
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* drivers/net/ethernet/micrel/ks8851.c
+ *
+ * Copyright 2009 Simtec Electronics
+ * http://www.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "ks8851.h"
+
+static int msg_enable;
+
+/**
+ * struct ks8851_net_spi - KS8851 SPI driver private data
+ * @lock: Lock to ensure that the device is not accessed when busy.
+ * @tx_work: Work queue for tx packets
+ * @ks8851: KS8851 driver common private data
+ * @spidev: The spi device we're bound to.
+ * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
+ * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2.
+ * @spi_xfer1: @spi_msg1 SPI transfer structure
+ * @spi_xfer2: @spi_msg2 SPI transfer structure
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not ccessible until the transfer is finished and
+ * the DMA has been de-asserted.
+ */
+struct ks8851_net_spi {
+ struct ks8851_net ks8851;
+ struct mutex lock;
+ struct work_struct tx_work;
+ struct spi_device *spidev;
+ struct spi_message spi_msg1;
+ struct spi_message spi_msg2;
+ struct spi_transfer spi_xfer1;
+ struct spi_transfer spi_xfer2[2];
+};
+
+#define to_ks8851_spi(ks) container_of((ks), struct ks8851_net_spi, ks8851)
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD 0x00
+#define KS_SPIOP_WR 0x40
+#define KS_SPIOP_RXFIFO 0x80
+#define KS_SPIOP_TXFIFO 0xC0
+
+/* shift for byte-enable data */
+#define BYTE_EN(_x) ((_x) << 2)
+
+/* turn register number and byte-enable mask into data for start of packet */
+#define MK_OP(_byteen, _reg) \
+ (BYTE_EN(_byteen) | (_reg) << (8 + 2) | (_reg) >> 6)
+
+/**
+ * ks8851_lock_spi - register access lock
+ * @ks: The chip state
+ * @flags: Spinlock flags
+ *
+ * Claim chip register access lock
+ */
+static void ks8851_lock_spi(struct ks8851_net *ks, unsigned long *flags)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+
+ mutex_lock(&kss->lock);
+}
+
+/**
+ * ks8851_unlock_spi - register access unlock
+ * @ks: The chip state
+ * @flags: Spinlock flags
+ *
+ * Release chip register access lock
+ */
+static void ks8851_unlock_spi(struct ks8851_net *ks, unsigned long *flags)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+
+ mutex_unlock(&kss->lock);
+}
+
+/* SPI register read/write calls.
+ *
+ * All these calls issue SPI transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transferring packet data (RX/TX FIFO accesses).
+ */
+
+/**
+ * ks8851_wrreg16_spi - write 16bit register value to chip via SPI
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg16_spi(struct ks8851_net *ks, unsigned int reg,
+ unsigned int val)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+ struct spi_transfer *xfer = &kss->spi_xfer1;
+ struct spi_message *msg = &kss->spi_msg1;
+ __le16 txb[2];
+ int ret;
+
+ txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR);
+ txb[1] = cpu_to_le16(val);
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 4;
+
+ ret = spi_sync(kss->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "spi_sync() failed\n");
+}
+
+/**
+ * ks8851_rdreg - issue read register command and return the data
+ * @ks: The device state
+ * @op: The register address and byte enables in message format.
+ * @rxb: The RX buffer to return the result into
+ * @rxl: The length of data expected.
+ *
+ * This is the low level read call that issues the necessary spi message(s)
+ * to read data from the register specified in @op.
+ */
+static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
+ u8 *rxb, unsigned int rxl)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+ struct spi_transfer *xfer;
+ struct spi_message *msg;
+ __le16 *txb = (__le16 *)ks->txd;
+ u8 *trx = ks->rxd;
+ int ret;
+
+ txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
+
+ if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ msg = &kss->spi_msg2;
+ xfer = kss->spi_xfer2;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 2;
+
+ xfer++;
+ xfer->tx_buf = NULL;
+ xfer->rx_buf = trx;
+ xfer->len = rxl;
+ } else {
+ msg = &kss->spi_msg1;
+ xfer = &kss->spi_xfer1;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = rxl + 2;
+ }
+
+ ret = spi_sync(kss->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "read: spi_sync() failed\n");
+ else if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
+ memcpy(rxb, trx, rxl);
+ else
+ memcpy(rxb, trx + 2, rxl);
+}
+
+/**
+ * ks8851_rdreg16_spi - read 16 bit register from device via SPI
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+ */
+static unsigned int ks8851_rdreg16_spi(struct ks8851_net *ks, unsigned int reg)
+{
+ __le16 rx = 0;
+
+ ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2);
+ return le16_to_cpu(rx);
+}
+
+/**
+ * ks8851_rdfifo_spi - read data from the receive fifo via SPI
+ * @ks: The device state.
+ * @buff: The buffer address
+ * @len: The length of the data to read
+ *
+ * Issue an RXQ FIFO read command and read the @len amount of data from
+ * the FIFO into the buffer specified by @buff.
+ */
+static void ks8851_rdfifo_spi(struct ks8851_net *ks, u8 *buff, unsigned int len)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+ struct spi_transfer *xfer = kss->spi_xfer2;
+ struct spi_message *msg = &kss->spi_msg2;
+ u8 txb[1];
+ int ret;
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
+
+ /* set the operation we're issuing */
+ txb[0] = KS_SPIOP_RXFIFO;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 1;
+
+ xfer++;
+ xfer->rx_buf = buff;
+ xfer->tx_buf = NULL;
+ xfer->len = len;
+
+ ret = spi_sync(kss->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_wrfifo_spi - write packet to TX FIFO via SPI
+ * @ks: The device state.
+ * @txp: The sk_buff to transmit.
+ * @irq: IRQ on completion of the packet.
+ *
+ * Send the @txp to the chip. This means creating the relevant packet header
+ * specifying the length of the packet and the other information the chip
+ * needs, such as IRQ on completion. Send the header and the packet data to
+ * the device.
+ */
+static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
+ bool irq)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+ struct spi_transfer *xfer = kss->spi_xfer2;
+ struct spi_message *msg = &kss->spi_msg2;
+ unsigned int fid = 0;
+ int ret;
+
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
+
+ fid = ks->fid++;
+ fid &= TXFR_TXFID_MASK;
+
+ if (irq)
+ fid |= TXFR_TXIC; /* irq on completion */
+
+ /* start header at txb[1] to align txw entries */
+ ks->txh.txb[1] = KS_SPIOP_TXFIFO;
+ ks->txh.txw[1] = cpu_to_le16(fid);
+ ks->txh.txw[2] = cpu_to_le16(txp->len);
+
+ xfer->tx_buf = &ks->txh.txb[1];
+ xfer->rx_buf = NULL;
+ xfer->len = 5;
+
+ xfer++;
+ xfer->tx_buf = txp->data;
+ xfer->rx_buf = NULL;
+ xfer->len = ALIGN(txp->len, 4);
+
+ ret = spi_sync(kss->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_rx_skb_spi - receive skbuff
+ * @ks: The device state
+ * @skb: The skbuff
+ */
+static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
+{
+ netif_rx_ni(skb);
+}
+
+/**
+ * ks8851_tx_work - process tx packet(s)
+ * @work: The work strucutre what was scheduled.
+ *
+ * This is called when a number of packets have been scheduled for
+ * transmission and need to be sent to the device.
+ */
+static void ks8851_tx_work(struct work_struct *work)
+{
+ struct ks8851_net_spi *kss;
+ struct ks8851_net *ks;
+ unsigned long flags;
+ struct sk_buff *txb;
+ bool last;
+
+ kss = container_of(work, struct ks8851_net_spi, tx_work);
+ ks = &kss->ks8851;
+ last = skb_queue_empty(&ks->txq);
+
+ ks8851_lock_spi(ks, &flags);
+
+ while (!last) {
+ txb = skb_dequeue(&ks->txq);
+ last = skb_queue_empty(&ks->txq);
+
+ if (txb) {
+ ks8851_wrreg16_spi(ks, KS_RXQCR,
+ ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrfifo_spi(ks, txb, last);
+ ks8851_wrreg16_spi(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks8851_wrreg16_spi(ks, KS_TXQCR, TXQCR_METFE);
+
+ ks8851_done_tx(ks, txb);
+ }
+ }
+
+ ks8851_unlock_spi(ks, &flags);
+}
+
+/**
+ * ks8851_flush_tx_work_spi - flush outstanding TX work
+ * @ks: The device state
+ */
+static void ks8851_flush_tx_work_spi(struct ks8851_net *ks)
+{
+ struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+
+ flush_work(&kss->tx_work);
+}
+
+/**
+ * calc_txlen - calculate size of message to send packet
+ * @len: Length of data
+ *
+ * Returns the size of the TXFIFO message needed to send
+ * this packet.
+ */
+static unsigned int calc_txlen(unsigned int len)
+{
+ return ALIGN(len + 4, 4);
+}
+
+/**
+ * ks8851_start_xmit_spi - transmit packet using SPI
+ * @skb: The buffer to transmit
+ * @dev: The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb. Queue the packet for
+ * the device and schedule the necessary work to transmit the packet when
+ * it is free.
+ *
+ * We do this to firstly avoid sleeping with the network device locked,
+ * and secondly so we can round up more than one packet to transmit which
+ * means we can try and avoid generating too many transmit done interrupts.
+ */
+static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ unsigned int needed = calc_txlen(skb->len);
+ struct ks8851_net *ks = netdev_priv(dev);
+ netdev_tx_t ret = NETDEV_TX_OK;
+ struct ks8851_net_spi *kss;
+
+ kss = to_ks8851_spi(ks);
+
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+
+ spin_lock(&ks->statelock);
+
+ if (needed > ks->tx_space) {
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ } else {
+ ks->tx_space -= needed;
+ skb_queue_tail(&ks->txq, skb);
+ }
+
+ spin_unlock(&ks->statelock);
+ schedule_work(&kss->tx_work);
+
+ return ret;
+}
+
+static int ks8851_probe_spi(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ks8851_net_spi *kss;
+ struct net_device *netdev;
+ struct ks8851_net *ks;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(struct ks8851_net_spi));
+ if (!netdev)
+ return -ENOMEM;
+
+ spi->bits_per_word = 8;
+
+ ks = netdev_priv(netdev);
+
+ ks->lock = ks8851_lock_spi;
+ ks->unlock = ks8851_unlock_spi;
+ ks->rdreg16 = ks8851_rdreg16_spi;
+ ks->wrreg16 = ks8851_wrreg16_spi;
+ ks->rdfifo = ks8851_rdfifo_spi;
+ ks->wrfifo = ks8851_wrfifo_spi;
+ ks->start_xmit = ks8851_start_xmit_spi;
+ ks->rx_skb = ks8851_rx_skb_spi;
+ ks->flush_tx_work = ks8851_flush_tx_work_spi;
+
+#define STD_IRQ (IRQ_LCI | /* Link Change */ \
+ IRQ_TXI | /* TX done */ \
+ IRQ_RXI | /* RX done */ \
+ IRQ_SPIBEI | /* SPI bus error */ \
+ IRQ_TXPSI | /* TX process stop */ \
+ IRQ_RXPSI) /* RX process stop */
+ ks->rc_ier = STD_IRQ;
+
+ kss = to_ks8851_spi(ks);
+
+ kss->spidev = spi;
+ mutex_init(&kss->lock);
+ INIT_WORK(&kss->tx_work, ks8851_tx_work);
+
+ /* initialise pre-made spi transfer messages */
+ spi_message_init(&kss->spi_msg1);
+ spi_message_add_tail(&kss->spi_xfer1, &kss->spi_msg1);
+
+ spi_message_init(&kss->spi_msg2);
+ spi_message_add_tail(&kss->spi_xfer2[0], &kss->spi_msg2);
+ spi_message_add_tail(&kss->spi_xfer2[1], &kss->spi_msg2);
+
+ netdev->irq = spi->irq;
+
+ return ks8851_probe_common(netdev, dev, msg_enable);
+}
+
+static int ks8851_remove_spi(struct spi_device *spi)
+{
+ return ks8851_remove_common(&spi->dev);
+}
+
+static const struct of_device_id ks8851_match_table[] = {
+ { .compatible = "micrel,ks8851" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ks8851_match_table);
+
+static struct spi_driver ks8851_driver = {
+ .driver = {
+ .name = "ks8851",
+ .of_match_table = ks8851_match_table,
+ .pm = &ks8851_pm_ops,
+ },
+ .probe = ks8851_probe_spi,
+ .remove = ks8851_remove_spi,
+};
+module_spi_driver(ks8851_driver);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+MODULE_ALIAS("spi:ks8851");
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 1f496fac7033..5bd7fb917b7a 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -17,11 +17,6 @@
#include "encx24j600_hw.h"
-static inline bool is_bits_set(int value, int mask)
-{
- return (value & mask) == mask;
-}
-
static int encx24j600_switch_bank(struct encx24j600_context *ctx,
int bank)
{
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index b25a13da900a..2c0dcd7acf3f 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -604,9 +604,8 @@ static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
}
}
-static int encx24j600_hw_init(struct encx24j600_priv *priv)
+static void encx24j600_hw_init(struct encx24j600_priv *priv)
{
- int ret = 0;
u16 macon2;
priv->hw_enabled = false;
@@ -649,8 +648,6 @@ static int encx24j600_hw_init(struct encx24j600_priv *priv)
if (netif_msg_hw(priv))
encx24j600_dump_config(priv, "Hw is initialized");
-
- return ret;
}
static void encx24j600_hw_enable(struct encx24j600_priv *priv)
@@ -1042,12 +1039,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
}
/* Initialize the device HW to the consistent state */
- if (encx24j600_hw_init(priv)) {
- netif_err(priv, probe, ndev,
- DRV_NAME ": HW initialization error\n");
- ret = -EIO;
- goto out_free;
- }
+ encx24j600_hw_init(priv);
kthread_init_worker(&priv->kworker);
kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 3a0b289d9771..c533d06fbe3a 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -2,11 +2,11 @@
/* Copyright (C) 2018 Microchip Technology Inc. */
#include <linux/netdevice.h>
-#include "lan743x_main.h"
-#include "lan743x_ethtool.h"
#include <linux/net_tstamp.h>
#include <linux/pci.h>
#include <linux/phy.h>
+#include "lan743x_main.h"
+#include "lan743x_ethtool.h"
/* eeprom */
#define LAN743X_EEPROM_MAGIC (0x74A5)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index a43140f7b5eb..36624e3c633b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -8,7 +8,10 @@
#include <linux/crc32.h>
#include <linux/microchipphy.h>
#include <linux/net_tstamp.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
#include <linux/crc16.h>
@@ -798,9 +801,9 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
netdev = adapter->netdev;
- /* setup auto duplex, and speed detection */
+ /* disable auto duplex, and speed detection. Phylib does that */
data = lan743x_csr_read(adapter, MAC_CR);
- data |= MAC_CR_ADD_ | MAC_CR_ASD_;
+ data &= ~(MAC_CR_ADD_ | MAC_CR_ASD_);
data |= MAC_CR_CNTR_RST_;
lan743x_csr_write(adapter, MAC_CR, data);
@@ -946,6 +949,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
+ u32 data;
phy_print_status(phydev);
if (phydev->state == PHY_RUNNING) {
@@ -953,6 +957,39 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
int remote_advertisement = 0;
int local_advertisement = 0;
+ data = lan743x_csr_read(adapter, MAC_CR);
+
+ /* set interface mode */
+ if (phy_interface_mode_is_rgmii(adapter->phy_mode))
+ /* RGMII */
+ data &= ~MAC_CR_MII_EN_;
+ else
+ /* GMII */
+ data |= MAC_CR_MII_EN_;
+
+ /* set duplex mode */
+ if (phydev->duplex)
+ data |= MAC_CR_DPX_;
+ else
+ data &= ~MAC_CR_DPX_;
+
+ /* set bus speed */
+ switch (phydev->speed) {
+ case SPEED_10:
+ data &= ~MAC_CR_CFG_H_;
+ data &= ~MAC_CR_CFG_L_;
+ break;
+ case SPEED_100:
+ data &= ~MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
+ case SPEED_1000:
+ data |= MAC_CR_CFG_H_;
+ data |= MAC_CR_CFG_L_;
+ break;
+ }
+ lan743x_csr_write(adapter, MAC_CR, data);
+
memset(&ksettings, 0, sizeof(ksettings));
phy_ethtool_get_link_ksettings(netdev, &ksettings);
local_advertisement =
@@ -980,20 +1017,44 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
struct lan743x_phy *phy = &adapter->phy;
+ struct device_node *phynode;
struct phy_device *phydev;
struct net_device *netdev;
int ret = -EIO;
netdev = adapter->netdev;
- phydev = phy_find_first(adapter->mdiobus);
- if (!phydev)
- goto return_error;
+ phynode = of_node_get(adapter->pdev->dev.of_node);
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ if (phynode) {
+ of_get_phy_mode(phynode, &adapter->phy_mode);
+
+ if (of_phy_is_fixed_link(phynode)) {
+ ret = of_phy_register_fixed_link(phynode);
+ if (ret) {
+ netdev_err(netdev,
+ "cannot register fixed PHY\n");
+ of_node_put(phynode);
+ goto return_error;
+ }
+ }
+ phydev = of_phy_connect(netdev, phynode,
+ lan743x_phy_link_status_change, 0,
+ adapter->phy_mode);
+ of_node_put(phynode);
+ if (!phydev)
+ goto return_error;
+ } else {
+ phydev = phy_find_first(adapter->mdiobus);
+ if (!phydev)
+ goto return_error;
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- PHY_INTERFACE_MODE_GMII);
- if (ret)
- goto return_error;
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ adapter->phy_mode);
+ if (ret)
+ goto return_error;
+ }
/* MAC doesn't support 1000T Half */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 3b02eeae5f45..c61a40411317 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -4,6 +4,7 @@
#ifndef _LAN743X_H
#define _LAN743X_H
+#include <linux/phy.h>
#include "lan743x_ptp.h"
#define DRIVER_AUTHOR "Bryan Whitehead <Bryan.Whitehead@microchip.com>"
@@ -104,10 +105,14 @@
((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_)
#define MAC_CR (0x100)
+#define MAC_CR_MII_EN_ BIT(19)
#define MAC_CR_EEE_EN_ BIT(17)
#define MAC_CR_ADD_ BIT(12)
#define MAC_CR_ASD_ BIT(11)
#define MAC_CR_CNTR_RST_ BIT(5)
+#define MAC_CR_DPX_ BIT(3)
+#define MAC_CR_CFG_H_ BIT(2)
+#define MAC_CR_CFG_L_ BIT(1)
#define MAC_CR_RST_ BIT(0)
#define MAC_RX (0x104)
@@ -698,6 +703,7 @@ struct lan743x_rx {
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
+ phy_interface_t phy_mode;
int msg_enable;
#ifdef CONFIG_PM
u32 wolopts;
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 9399f6a98748..ab6d719d40f0 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -2,12 +2,12 @@
/* Copyright (C) 2018 Microchip Technology Inc. */
#include <linux/netdevice.h>
-#include "lan743x_main.h"
#include <linux/ptp_clock_kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/net_tstamp.h>
+#include "lan743x_main.h"
#include "lan743x_ptp.h"
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index f70bb81e1ed6..49fd843c4c8a 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -331,14 +331,15 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
void *desc;
unsigned int len;
unsigned int tx_head;
u32 txdes1;
- int ret = NETDEV_TX_BUSY;
+ netdev_tx_t ret = NETDEV_TX_BUSY;
spin_lock_irq(&priv->txlock);
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 9a36c26095c8..91b33b55054e 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0 OR MIT)
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o
mscc_ocelot_common-y := ocelot.o ocelot_io.o
-mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o
+mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o ocelot_ptp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index efb3965a3e42..9cfe1fd98c30 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
#include <linux/iopoll.h>
#include <net/arp.h>
@@ -1205,18 +1204,19 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->chip_port;
- /* The function is only used for PTP operations for now */
- if (!ocelot->ptp)
- return -EOPNOTSUPP;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(ocelot, port, ifr);
- case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(ocelot, port, ifr);
- default:
- return -EOPNOTSUPP;
+ /* If the attached PHY device isn't capable of timestamping operations,
+ * use our own (when possible).
+ */
+ if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return ocelot_hwstamp_set(ocelot, port, ifr);
+ case SIOCGHWTSTAMP:
+ return ocelot_hwstamp_get(ocelot, port, ifr);
+ }
}
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static const struct net_device_ops ocelot_port_netdev_ops = {
@@ -1348,6 +1348,12 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
@@ -1996,200 +2002,6 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
-int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- time64_t s;
- u32 val;
- s64 ns;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
- s <<= 32;
- s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
- ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
-
- /* Deal with negative values */
- if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
- s--;
- ns &= 0xf;
- ns += 999999984;
- }
-
- set_normalized_timespec64(ts, s, ns);
- return 0;
-}
-EXPORT_SYMBOL(ocelot_ptp_gettime64);
-
-static int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
- TOD_ACC_PIN);
- ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
- TOD_ACC_PIN);
- ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-}
-
-static int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
- ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
- ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
-
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
-
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- } else {
- /* Fall back using ocelot_ptp_settime64 which is not exact. */
- struct timespec64 ts;
- u64 now;
-
- ocelot_ptp_gettime64(ptp, &ts);
-
- now = ktime_to_ns(timespec64_to_ktime(ts));
- ts = ns_to_timespec64(now + delta);
-
- ocelot_ptp_settime64(ptp, &ts);
- }
- return 0;
-}
-
-static int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
-{
- struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
- u32 unit = 0, direction = 0;
- unsigned long flags;
- u64 adj = 0;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- if (!scaled_ppm)
- goto disable_adj;
-
- if (scaled_ppm < 0) {
- direction = PTP_CFG_CLK_ADJ_CFG_DIR;
- scaled_ppm = -scaled_ppm;
- }
-
- adj = PSEC_PER_SEC << 16;
- do_div(adj, scaled_ppm);
- do_div(adj, 1000);
-
- /* If the adjustment value is too large, use ns instead */
- if (adj >= (1L << 30)) {
- unit = PTP_CFG_CLK_ADJ_FREQ_NS;
- do_div(adj, 1000);
- }
-
- /* Still too big */
- if (adj >= (1L << 30))
- goto disable_adj;
-
- ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
- ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
- PTP_CLK_CFG_ADJ_CFG);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-
-disable_adj:
- ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
- return 0;
-}
-
-static struct ptp_clock_info ocelot_ptp_clock_info = {
- .owner = THIS_MODULE,
- .name = "ocelot ptp",
- .max_adj = 0x7fffffff,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = 0,
- .n_pins = 0,
- .pps = 0,
- .gettime64 = ocelot_ptp_gettime64,
- .settime64 = ocelot_ptp_settime64,
- .adjtime = ocelot_ptp_adjtime,
- .adjfine = ocelot_ptp_adjfine,
-};
-
-static int ocelot_init_timestamp(struct ocelot *ocelot)
-{
- struct ptp_clock *ptp_clock;
-
- ocelot->ptp_info = ocelot_ptp_clock_info;
- ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
- if (IS_ERR(ptp_clock))
- return PTR_ERR(ptp_clock);
- /* Check if PHC support is missing at the configuration level */
- if (!ptp_clock)
- return 0;
-
- ocelot->ptp_clock = ptp_clock;
-
- ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
- ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
- ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
-
- ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
-
- /* There is no device reconfiguration, PTP Rx stamping is always
- * enabled.
- */
- ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-
- return 0;
-}
-
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
@@ -2535,15 +2347,6 @@ int ocelot_init(struct ocelot *ocelot)
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
- if (ocelot->ptp) {
- ret = ocelot_init_timestamp(ocelot);
- if (ret) {
- dev_err(ocelot->dev,
- "Timestamp initialization failed\n");
- return ret;
- }
- }
-
return 0;
}
EXPORT_SYMBOL(ocelot_init);
@@ -2556,8 +2359,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
- if (ocelot->ptp_clock)
- ptp_clock_unregister(ocelot->ptp_clock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
port = ocelot->ports[i];
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 641af929497f..f0a15aa187f2 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -15,18 +15,17 @@
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot.h>
#include "ocelot_rew.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
-#include "ocelot_ptp.h"
#define OCELOT_BUFFER_CELL_SZ 60
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 3bd286044480..dfd82a3baab2 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -706,13 +706,124 @@ ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
return NULL;
}
+/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
+ * on destination and source MAC addresses, but only on higher-level protocol
+ * information. The only frame types to match on keys containing MAC addresses
+ * in this case are non-SNAP, non-ARP, non-IP and non-OAM frames.
+ *
+ * If @on=true, then the above frame types (SNAP, ARP, IP and OAM) will match
+ * on MAC_ETYPE keys such as destination and source MAC on this ingress port.
+ * However the setting has the side effect of making these frames not matching
+ * on any _other_ keys than MAC_ETYPE ones.
+ */
+static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port,
+ bool on)
+{
+ u32 val = 0;
+
+ if (on)
+ val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(3) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(3);
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M,
+ ANA_PORT_VCAP_S2_CFG, port);
+}
+
+static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace)
+{
+ u16 proto, mask;
+
+ if (ace->type != OCELOT_ACE_TYPE_ETYPE)
+ return false;
+
+ proto = ntohs(*(u16 *)ace->frame.etype.etype.value);
+ mask = ntohs(*(u16 *)ace->frame.etype.etype.mask);
+
+ /* ETH_P_ALL match, so all protocols below are included */
+ if (mask == 0)
+ return true;
+ if (proto == ETH_P_ARP)
+ return true;
+ if (proto == ETH_P_IP)
+ return true;
+ if (proto == ETH_P_IPV6)
+ return true;
+
+ return false;
+}
+
+static bool ocelot_ace_is_problematic_non_mac_etype(struct ocelot_ace_rule *ace)
+{
+ if (ace->type == OCELOT_ACE_TYPE_SNAP)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_ARP)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_IPV4)
+ return true;
+ if (ace->type == OCELOT_ACE_TYPE_IPV6)
+ return true;
+ return false;
+}
+
+static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot,
+ struct ocelot_ace_rule *ace)
+{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
+ struct ocelot_ace_rule *tmp;
+ unsigned long port;
+ int i;
+
+ if (ocelot_ace_is_problematic_mac_etype(ace)) {
+ /* Search for any non-MAC_ETYPE rules on the port */
+ for (i = 0; i < block->count; i++) {
+ tmp = ocelot_ace_rule_get_rule_index(block, i);
+ if (tmp->ingress_port_mask & ace->ingress_port_mask &&
+ ocelot_ace_is_problematic_non_mac_etype(tmp))
+ return false;
+ }
+
+ for_each_set_bit(port, &ace->ingress_port_mask,
+ ocelot->num_phys_ports)
+ ocelot_match_all_as_mac_etype(ocelot, port, true);
+ } else if (ocelot_ace_is_problematic_non_mac_etype(ace)) {
+ /* Search for any MAC_ETYPE rules on the port */
+ for (i = 0; i < block->count; i++) {
+ tmp = ocelot_ace_rule_get_rule_index(block, i);
+ if (tmp->ingress_port_mask & ace->ingress_port_mask &&
+ ocelot_ace_is_problematic_mac_etype(tmp))
+ return false;
+ }
+
+ for_each_set_bit(port, &ace->ingress_port_mask,
+ ocelot->num_phys_ports)
+ ocelot_match_all_as_mac_etype(ocelot, port, false);
+ }
+
+ return true;
+}
+
int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
- struct ocelot_ace_rule *rule)
+ struct ocelot_ace_rule *rule,
+ struct netlink_ext_ack *extack)
{
struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *ace;
int i, index;
+ if (!ocelot_exclusive_mac_etype_ace_rules(ocelot, rule)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules");
+ return -EBUSY;
+ }
+
/* Add rule to the linked list */
ocelot_ace_rule_add(ocelot, block, rule);
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index 29d22c566786..099e177f2617 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -194,7 +194,7 @@ struct ocelot_ace_rule {
enum ocelot_ace_action action;
struct ocelot_ace_stats stats;
- u16 ingress_port_mask;
+ unsigned long ingress_port_mask;
enum ocelot_vcap_bit dmac_mc;
enum ocelot_vcap_bit dmac_bc;
@@ -215,7 +215,8 @@ struct ocelot_ace_rule {
};
int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
- struct ocelot_ace_rule *rule);
+ struct ocelot_ace_rule *rule,
+ struct netlink_ext_ack *extack);
int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
struct ocelot_ace_rule *rule);
int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 0ac9fbf77a01..4a15d2ff8b70 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -189,7 +189,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
skb->offload_fwd_mark = 1;
skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
} while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
@@ -366,6 +367,23 @@ static const struct vcap_props vsc7514_vcap_props[] = {
},
};
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ocelot ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -469,6 +487,15 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->vcap = vsc7514_vcap_props;
ocelot_init(ocelot);
+ if (ocelot->ptp) {
+ err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ if (err) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ ocelot->ptp = 0;
+ }
+ }
+
/* No NPI port */
ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
OCELOT_TAG_PREFIX_NONE);
@@ -574,6 +601,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 341923311fec..5ce172e22b43 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -51,6 +51,8 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ u16 proto = ntohs(f->common.protocol);
+ bool match_protocol = true;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -71,7 +73,6 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
- u16 proto = ntohs(f->common.protocol);
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
@@ -86,11 +87,6 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
BIT(FLOW_DISSECTOR_KEY_CONTROL)))
return -EOPNOTSUPP;
- if (proto == ETH_P_IP ||
- proto == ETH_P_IPV6 ||
- proto == ETH_P_ARP)
- return -EOPNOTSUPP;
-
flow_rule_match_eth_addrs(rule, &match);
ace->type = OCELOT_ACE_TYPE_ETYPE;
ether_addr_copy(ace->frame.etype.dmac.value,
@@ -114,6 +110,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
match.key->ip_proto;
ace->frame.ipv4.proto.mask[0] =
match.mask->ip_proto;
+ match_protocol = false;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
ace->type = OCELOT_ACE_TYPE_IPV6;
@@ -121,11 +118,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
match.key->ip_proto;
ace->frame.ipv6.proto.mask[0] =
match.mask->ip_proto;
+ match_protocol = false;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) &&
- ntohs(f->common.protocol) == ETH_P_IP) {
+ proto == ETH_P_IP) {
struct flow_match_ipv4_addrs match;
u8 *tmp;
@@ -141,10 +139,11 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
tmp = &ace->frame.ipv4.dip.mask.addr[0];
memcpy(tmp, &match.mask->dst, 4);
+ match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) &&
- ntohs(f->common.protocol) == ETH_P_IPV6) {
+ proto == ETH_P_IPV6) {
return -EOPNOTSUPP;
}
@@ -156,6 +155,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
ace->frame.ipv4.sport.mask = ntohs(match.mask->src);
ace->frame.ipv4.dport.value = ntohs(match.key->dst);
ace->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+ match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -167,9 +167,20 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
ace->vlan.vid.mask = match.mask->vlan_id;
ace->vlan.pcp.value[0] = match.key->vlan_priority;
ace->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ match_protocol = false;
}
finished_key_parsing:
+ if (match_protocol && proto != ETH_P_ALL) {
+ /* TODO: support SNAP, LLC etc */
+ if (proto < ETH_P_802_3_MIN)
+ return -EOPNOTSUPP;
+ ace->type = OCELOT_ACE_TYPE_ETYPE;
+ *(u16 *)ace->frame.etype.etype.value = htons(proto);
+ *(u16 *)ace->frame.etype.etype.mask = 0xffff;
+ }
+ /* else, a rule of type OCELOT_ACE_TYPE_ANY is implicitly added */
+
ace->prio = f->common.prio;
ace->id = f->cookie;
return ocelot_flower_parse_action(f, ace);
@@ -205,7 +216,7 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
return ret;
}
- return ocelot_ace_rule_offload_add(ocelot, ace);
+ return ocelot_ace_rule_offload_add(ocelot, ace, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
new file mode 100644
index 000000000000..a3088a1676ed
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot PTP clock driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright 2020 NXP
+ */
+#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ time64_t s;
+ u32 val;
+ s64 ns;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
+ s <<= 32;
+ s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_gettime64);
+
+int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_settime64);
+
+int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ struct ocelot *ocelot = container_of(ptp, struct ocelot,
+ ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
+ PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
+ PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using ocelot_ptp_settime64 which is not exact. */
+ struct timespec64 ts;
+ u64 now;
+
+ ocelot_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ocelot_ptp_settime64(ptp, &ts);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_adjtime);
+
+int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ u32 unit = 0, direction = 0;
+ unsigned long flags;
+ u64 adj = 0;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ if (!scaled_ppm)
+ goto disable_adj;
+
+ if (scaled_ppm < 0) {
+ direction = PTP_CFG_CLK_ADJ_CFG_DIR;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = PSEC_PER_SEC << 16;
+ do_div(adj, scaled_ppm);
+ do_div(adj, 1000);
+
+ /* If the adjustment value is too large, use ns instead */
+ if (adj >= (1L << 30)) {
+ unit = PTP_CFG_CLK_ADJ_FREQ_NS;
+ do_div(adj, 1000);
+ }
+
+ /* Still too big */
+ if (adj >= (1L << 30))
+ goto disable_adj;
+
+ ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
+ ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
+ PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+
+disable_adj:
+ ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_adjfine);
+
+int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_verify);
+
+int ocelot_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ struct timespec64 ts_start, ts_period;
+ enum ocelot_ptp_pins ptp_pin;
+ unsigned long flags;
+ bool pps = false;
+ int pin = -1;
+ u32 val;
+ s64 ns;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin == 0)
+ ptp_pin = PTP_PIN_0;
+ else if (pin == 1)
+ ptp_pin = PTP_PIN_1;
+ else if (pin == 2)
+ ptp_pin = PTP_PIN_2;
+ else if (pin == 3)
+ ptp_pin = PTP_PIN_3;
+ else
+ return -EBUSY;
+
+ ts_start.tv_sec = rq->perout.start.sec;
+ ts_start.tv_nsec = rq->perout.start.nsec;
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+
+ if (ts_period.tv_sec == 1 && ts_period.tv_nsec == 0)
+ pps = true;
+
+ if (ts_start.tv_sec || (ts_start.tv_nsec && !pps)) {
+ dev_warn(ocelot->dev,
+ "Absolute start time not supported!\n");
+ dev_warn(ocelot->dev,
+ "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n");
+ return -EINVAL;
+ }
+
+ /* Handle turning off */
+ if (!on) {
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ }
+
+ /* Handle PPS request */
+ if (pps) {
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ /* Pulse generated perout.start.nsec after TOD has
+ * increased seconds.
+ * Pulse width is set to 1us.
+ */
+ ocelot_write_rix(ocelot, ts_start.tv_nsec,
+ PTP_PIN_WF_LOW_PERIOD, ptp_pin);
+ ocelot_write_rix(ocelot, 1000,
+ PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
+ val |= PTP_PIN_CFG_SYNC;
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ }
+
+ /* Handle periodic clock */
+ ns = timespec64_to_ns(&ts_period);
+ ns = ns >> 1;
+ if (ns > 0x3fffffff || ns <= 0x6)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+ ocelot_write_rix(ocelot, ns, PTP_PIN_WF_LOW_PERIOD, ptp_pin);
+ ocelot_write_rix(ocelot, ns, PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
+ val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_enable);
+
+int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info)
+{
+ struct ptp_clock *ptp_clock;
+ int i;
+
+ ocelot->ptp_info = *info;
+
+ for (i = 0; i < OCELOT_PTP_PINS_NUM; i++) {
+ struct ptp_pin_desc *p = &ocelot->ptp_pins[i];
+
+ snprintf(p->name, sizeof(p->name), "switch_1588_dat%d", i);
+ p->index = i;
+ p->func = PTP_PF_NONE;
+ }
+
+ ocelot->ptp_info.pin_config = &ocelot->ptp_pins[0];
+
+ ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
+ if (IS_ERR(ptp_clock))
+ return PTR_ERR(ptp_clock);
+ /* Check if PHC support is missing at the configuration level */
+ if (!ptp_clock)
+ return 0;
+
+ ocelot->ptp_clock = ptp_clock;
+
+ ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
+
+ ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
+
+ /* There is no device reconfiguration, PTP Rx stamping is always
+ * enabled.
+ */
+ ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_init_timestamp);
+
+int ocelot_deinit_timestamp(struct ocelot *ocelot)
+{
+ if (ocelot->ptp_clock)
+ ptp_clock_unregister(ocelot->ptp_clock);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_deinit_timestamp);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.h b/drivers/net/ethernet/mscc/ocelot_ptp.h
deleted file mode 100644
index 9ede14a12573..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_ptp.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * License: Dual MIT/GPL
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_PTP_H_
-#define _MSCC_OCELOT_PTP_H_
-
-#define PTP_PIN_CFG_RSZ 0x20
-#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
-#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
-#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ
-
-#define PTP_PIN_CFG_DOM BIT(0)
-#define PTP_PIN_CFG_SYNC BIT(2)
-#define PTP_PIN_CFG_ACTION(x) ((x) << 3)
-#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7)
-
-enum {
- PTP_PIN_ACTION_IDLE = 0,
- PTP_PIN_ACTION_LOAD,
- PTP_PIN_ACTION_SAVE,
- PTP_PIN_ACTION_CLOCK,
- PTP_PIN_ACTION_DELTA,
- PTP_PIN_ACTION_NOSYNC,
- PTP_PIN_ACTION_SYNC,
-};
-
-#define PTP_CFG_MISC_PTP_EN BIT(2)
-
-#define PSEC_PER_SEC 1000000000000LL
-
-#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0)
-#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1)
-
-#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index 7d4fd1b6adda..81d81ff75646 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -239,6 +239,8 @@ static const u32 ocelot_ptp_regmap[] = {
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
REG(PTP_CFG_MISC, 0x0000a0),
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index d326e231f0ad..b7baf7624e18 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -48,7 +48,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
NL_SET_ERR_MSG_MOD(extack,
- "Only one policer per port is supported\n");
+ "Only one policer per port is supported");
return -EEXIST;
}
@@ -59,7 +59,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
err = ocelot_port_policer_add(ocelot, port, &pol);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n");
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
return err;
}
@@ -73,7 +73,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
err = ocelot_port_policer_del(ocelot, port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Could not delete policer\n");
+ "Could not delete policer");
return err;
}
priv->tc.police_id = 0;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 2616fd735aab..e1e1f4e3639e 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1174,18 +1174,6 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
mb();
}
-static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
-{
- struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
-
- if ((skb->protocol == htons(ETH_P_8021Q)) &&
- (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
- vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
- skb->csum = hw_csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
-}
-
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index 5e630f3a0189..a82a37094579 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -27,7 +27,7 @@ config S2IO
on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/neterion/s2io.txt>.
+ <file:Documentation/networking/device_drivers/neterion/s2io.rst>.
To compile this driver as a module, choose M here. The module
will be called s2io.
@@ -42,7 +42,7 @@ config VXGE
labeled as either one, depending on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/neterion/vxge.txt>.
+ <file:Documentation/networking/device_drivers/neterion/vxge.rst>.
To compile this driver as a module, choose M here. The module
will be called vxge.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 1c76e1592ca2..ff844e5cc41f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -209,7 +209,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
NFP_FL_OUT_FLAGS_USE_TUN);
output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
} else if (netif_is_lag_master(out_dev) &&
- priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
int gid;
output->flags = cpu_to_be16(tmp_flags);
@@ -956,7 +956,7 @@ nfp_flower_output_action(struct nfp_app *app,
*a_len += sizeof(struct nfp_fl_output);
- if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
/* nfp_fl_pre_lag returns -err or size of prelag action added.
* This will be 0 if it is not egressing to a lag dev.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index a595ddb92bff..a050cb898782 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -264,7 +264,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_cmsg_portmod_rx(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE) {
nfp_flower_cmsg_merge_hint_rx(app, skb);
break;
}
@@ -285,7 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_stats_rlim_reply(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
break;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index d8ad9346a26a..c39327677a7d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -665,6 +665,77 @@ err_clear_nn:
return err;
}
+static void nfp_flower_wait_host_bit(struct nfp_app *app)
+{
+ unsigned long err_at;
+ u64 feat;
+ int err;
+
+ /* Wait for HOST_ACK flag bit to propagate */
+ err_at = jiffies + msecs_to_jiffies(100);
+ do {
+ feat = nfp_rtsym_read_le(app->pf->rtbl,
+ "_abi_flower_combined_features_global",
+ &err);
+ if (time_is_before_eq_jiffies(err_at)) {
+ nfp_warn(app->cpp,
+ "HOST_ACK bit not propagated in FW.\n");
+ break;
+ }
+ usleep_range(1000, 2000);
+ } while (!err && !(feat & NFP_FL_FEATS_HOST_ACK));
+
+ if (err)
+ nfp_warn(app->cpp,
+ "Could not read global features entry from FW\n");
+}
+
+static int nfp_flower_sync_feature_bits(struct nfp_app *app)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ int err;
+
+ /* Tell the firmware of the host supported features. */
+ err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_host_mask",
+ app_priv->flower_ext_feats |
+ NFP_FL_FEATS_HOST_ACK);
+ if (!err)
+ nfp_flower_wait_host_bit(app);
+ else if (err != -ENOENT)
+ return err;
+
+ /* Tell the firmware that the driver supports lag. */
+ err = nfp_rtsym_write_le(app->pf->rtbl,
+ "_abi_flower_balance_sync_enable", 1);
+ if (!err) {
+ app_priv->flower_en_feats |= NFP_FL_ENABLE_LAG;
+ nfp_flower_lag_init(&app_priv->nfp_lag);
+ } else if (err == -ENOENT) {
+ nfp_warn(app->cpp, "LAG not supported by FW.\n");
+ } else {
+ return err;
+ }
+
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
+ /* Tell the firmware that the driver supports flow merging. */
+ err = nfp_rtsym_write_le(app->pf->rtbl,
+ "_abi_flower_merge_hint_enable", 1);
+ if (!err) {
+ app_priv->flower_en_feats |= NFP_FL_ENABLE_FLOW_MERGE;
+ nfp_flower_internal_port_init(app_priv);
+ } else if (err == -ENOENT) {
+ nfp_warn(app->cpp,
+ "Flow merge not supported by FW.\n");
+ } else {
+ return err;
+ }
+ } else {
+ nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
+ }
+
+ return 0;
+}
+
static int nfp_flower_init(struct nfp_app *app)
{
u64 version, features, ctx_count, num_mems;
@@ -753,35 +824,15 @@ static int nfp_flower_init(struct nfp_app *app)
if (err)
app_priv->flower_ext_feats = 0;
else
- app_priv->flower_ext_feats = features;
+ app_priv->flower_ext_feats = features & NFP_FL_FEATS_HOST;
- /* Tell the firmware that the driver supports lag. */
- err = nfp_rtsym_write_le(app->pf->rtbl,
- "_abi_flower_balance_sync_enable", 1);
- if (!err) {
- app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
- nfp_flower_lag_init(&app_priv->nfp_lag);
- } else if (err == -ENOENT) {
- nfp_warn(app->cpp, "LAG not supported by FW.\n");
- } else {
- goto err_cleanup_metadata;
- }
+ err = nfp_flower_sync_feature_bits(app);
+ if (err)
+ goto err_cleanup;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
- /* Tell the firmware that the driver supports flow merging. */
- err = nfp_rtsym_write_le(app->pf->rtbl,
- "_abi_flower_merge_hint_enable", 1);
- if (!err) {
- app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
- nfp_flower_internal_port_init(app_priv);
- } else if (err == -ENOENT) {
- nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
- } else {
- goto err_lag_clean;
- }
- } else {
- nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
- }
+ err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
+ if (err)
+ goto err_cleanup;
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_init(app);
@@ -792,10 +843,9 @@ static int nfp_flower_init(struct nfp_app *app)
return 0;
-err_lag_clean:
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+err_cleanup:
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
-err_cleanup_metadata:
nfp_flower_metadata_cleanup(app);
err_free_app_priv:
vfree(app->priv);
@@ -810,13 +860,16 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
+ flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+ nfp_flower_setup_indr_block_cb);
+
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE)
nfp_flower_internal_port_cleanup(app_priv);
nfp_flower_metadata_cleanup(app);
@@ -886,7 +939,7 @@ static int nfp_flower_start(struct nfp_app *app)
struct nfp_flower_priv *app_priv = app->priv;
int err;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
err = nfp_flower_lag_reset(&app_priv->nfp_lag);
if (err)
return err;
@@ -907,16 +960,12 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *app_priv = app->priv;
int ret;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
if (ret & NOTIFY_STOP_MASK)
return ret;
}
- ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
- if (ret & NOTIFY_STOP_MASK)
- return ret;
-
ret = nfp_flower_internal_port_event_handler(app, netdev, event);
if (ret & NOTIFY_STOP_MASK)
return ret;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index d55d0d33bc45..6c3dc3baf387 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -44,8 +44,20 @@ struct nfp_app;
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
-#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
-#define NFP_FL_FEATS_LAG BIT(31)
+#define NFP_FL_FEATS_HOST_ACK BIT(31)
+
+#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
+#define NFP_FL_ENABLE_LAG BIT(1)
+
+#define NFP_FL_FEATS_HOST \
+ (NFP_FL_FEATS_GENEVE | \
+ NFP_FL_NBI_MTU_SETTING | \
+ NFP_FL_FEATS_GENEVE_OPT | \
+ NFP_FL_FEATS_VLAN_PCP | \
+ NFP_FL_FEATS_VF_RLIM | \
+ NFP_FL_FEATS_FLOW_MOD | \
+ NFP_FL_FEATS_PRE_TUN_RULES | \
+ NFP_FL_FEATS_IPV6_TUN)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -145,6 +157,7 @@ struct nfp_fl_internal_ports {
* @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower
* @flower_ext_feats: Bitmap of extra features the HW supports
+ * @flower_en_feats: Bitmap of features enabled by HW
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
@@ -180,6 +193,7 @@ struct nfp_flower_priv {
u32 mask_id_seed;
u64 flower_version;
u64 flower_ext_feats;
+ u8 flower_en_feats;
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -346,7 +360,7 @@ nfp_flower_internal_port_can_offload(struct nfp_app *app,
{
struct nfp_flower_priv *app_priv = app->priv;
- if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
+ if (!(app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE))
return false;
if (!netdev->rtnl_link_ops)
return false;
@@ -444,9 +458,10 @@ void nfp_flower_qos_cleanup(struct nfp_app *app);
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow);
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
-int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
- struct net_device *netdev,
- unsigned long event);
+int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data);
+int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
void
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 546bc01d507d..f7f01e2e3dce 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -74,9 +74,10 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
-static void
+static int
nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
- struct nfp_flower_mac_mpls *msk, struct flow_rule *rule)
+ struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
+ struct netlink_ext_ack *extack)
{
memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
@@ -97,14 +98,28 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
u32 t_mpls;
flow_rule_match_mpls(rule, &match);
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
+
+ /* Only support matching the first LSE */
+ if (match.mask->used_lses != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: invalid LSE depth for MPLS match offload");
+ return -EOPNOTSUPP;
+ }
+
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
+ match.key->ls[0].mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
+ match.key->ls[0].mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
+ match.key->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
ext->mpls_lse = cpu_to_be32(t_mpls);
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
+ match.mask->ls[0].mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
+ match.mask->ls[0].mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
+ match.mask->ls[0].mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
msk->mpls_lse = cpu_to_be32(t_mpls);
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -121,6 +136,8 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
}
}
+
+ return 0;
}
static void
@@ -461,9 +478,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_in_port);
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
- nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
- (struct nfp_flower_mac_mpls *)msk,
- rule);
+ err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
+ (struct nfp_flower_mac_mpls *)msk,
+ rule, extack);
+ if (err)
+ return err;
+
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 6b60771ccb19..695d24b9dd92 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1619,8 +1619,8 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
return NULL;
}
-static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
struct flow_cls_offload *flower = type_data;
@@ -1708,10 +1708,13 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
return 0;
}
-static int
+int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
enum tc_setup_type type, void *type_data)
{
+ if (!nfp_fl_is_netdev_to_offload(netdev))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_BLOCK:
return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
@@ -1720,29 +1723,3 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
return -EOPNOTSUPP;
}
}
-
-int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
- struct net_device *netdev,
- unsigned long event)
-{
- int err;
-
- if (!nfp_fl_is_netdev_to_offload(netdev))
- return NOTIFY_OK;
-
- if (event == NETDEV_REGISTER) {
- err = __flow_indr_block_cb_register(netdev, app,
- nfp_flower_indr_setup_tc_cb,
- app);
- if (err)
- nfp_flower_cmsg_warn(app,
- "Indirect block reg failed - %s\n",
- netdev->name);
- } else if (event == NETDEV_UNREGISTER) {
- __flow_indr_block_cb_unregister(netdev,
- nfp_flower_indr_setup_tc_cb,
- app);
- }
-
- return NOTIFY_OK;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 4d282fc56009..7ff2ccbd43b0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/firmware.h>
-#include <linux/vermagic.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
@@ -31,7 +30,6 @@
#include "nfp_net.h"
static const char nfp_driver_name[] = "nfp";
-const char nfp_driver_version[] = VERMAGIC_STRING;
static const struct pci_device_id nfp_pci_device_ids[] = {
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
@@ -920,4 +918,3 @@ MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw");
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
-MODULE_VERSION(UTS_RELEASE);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9bfb3b077bc1..0e0cc3d58bdc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1741,10 +1741,15 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
unsigned int pkt_len, bool *completed)
{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
int wr_idx;
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
if (!*completed) {
nfp_net_xdp_complete(tx_ring);
@@ -1817,6 +1822,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
xdp.rxq = &rx_ring->xdp_rxq;
tx_ring = r_vec->xdp_ring;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2779f1526d1e..6eb9fb9a1814 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -203,8 +203,6 @@ nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
char nsp_version[ETHTOOL_FWVERS_LEN] = {};
strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
-
nfp_net_get_nspinfo(app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s %s %s %s", vnic_version, nsp_version,
@@ -1440,8 +1438,7 @@ static int nfp_net_set_channels(struct net_device *netdev,
unsigned int total_rx, total_tx;
/* Reject unsupported */
- if (!channel->combined_count ||
- channel->other_count != NFP_NET_NON_Q_VECTORS ||
+ if (channel->other_count != NFP_NET_NON_Q_VECTORS ||
(channel->rx_count && channel->tx_count))
return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 79d72c88bbef..b3cabc274121 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -299,6 +299,20 @@ static void nfp_repr_clean(struct nfp_repr *repr)
nfp_port_free(repr->port);
}
+static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
+
+static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
+}
+
+static void nfp_repr_set_lockdep_class(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
+}
+
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev)
@@ -308,6 +322,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
+ nfp_repr_set_lockdep_class(netdev);
+
repr->port = port;
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!repr->dst)
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2fdd0753b3af..d2708a57f2ff 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -502,7 +502,8 @@ static int nixge_check_tx_bd_space(struct nixge_priv *priv,
return 0;
}
-static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t nixge_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct nixge_hw_dma_bd *cur_p;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d20cf03a3ea0..d3cbb4215f5c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -823,7 +823,8 @@ static int lpc_mii_init(struct netdata_local *pldat)
if (err)
goto err_out_unregister_bus;
- if (lpc_mii_probe(pldat->ndev) != 0)
+ err = lpc_mii_probe(pldat->ndev);
+ if (err)
goto err_out_unregister_bus;
return 0;
@@ -1029,7 +1030,8 @@ static int lpc_eth_close(struct net_device *ndev)
return 0;
}
-static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
u32 len, txidx;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index f4ae40ae1e53..d83eff0ae0ac 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -388,6 +388,19 @@ int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
}
/* LIF commands */
+void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
+ u16 lif_type, u8 qtype, u8 qver)
+{
+ union ionic_dev_cmd cmd = {
+ .q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
+ .q_identify.lif_type = lif_type,
+ .q_identify.type = qtype,
+ .q_identify.ver = qver,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
{
union ionic_dev_cmd cmd = {
@@ -431,6 +444,7 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
.q_init.opcode = IONIC_CMD_Q_INIT,
.q_init.lif_index = cpu_to_le16(lif_index),
.q_init.type = q->type,
+ .q_init.ver = qcq->q.lif->qtype_info[q->type].version,
.q_init.index = cpu_to_le32(q->index),
.q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_ENA),
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 587398b01997..525434f10025 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,7 +12,8 @@
#define IONIC_MIN_MTU ETH_MIN_MTU
#define IONIC_MAX_MTU 9194
-#define IONIC_MAX_TXRX_DESC 16384
+#define IONIC_MAX_TX_DESC 8192
+#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 16
#define IONIC_DEF_TXRX_DESC 4096
#define IONIC_LIFS_MAX 1024
@@ -83,6 +84,8 @@ static_assert(sizeof(struct ionic_q_init_cmd) == 64);
static_assert(sizeof(struct ionic_q_init_comp) == 16);
static_assert(sizeof(struct ionic_q_control_cmd) == 64);
static_assert(sizeof(ionic_q_control_comp) == 16);
+static_assert(sizeof(struct ionic_q_identify_cmd) == 64);
+static_assert(sizeof(struct ionic_q_identify_comp) == 16);
static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64);
static_assert(sizeof(ionic_rx_mode_set_comp) == 16);
@@ -179,7 +182,7 @@ struct ionic_desc_info {
void *cb_arg;
};
-#define QUEUE_NAME_MAX_SZ 32
+#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
u64 dbell_count;
@@ -204,14 +207,14 @@ struct ionic_queue {
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
- char name[QUEUE_NAME_MAX_SZ];
+ char name[IONIC_QUEUE_NAME_MAX_SZ];
};
-#define INTR_INDEX_NOT_ASSIGNED -1
-#define INTR_NAME_MAX_SZ 32
+#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
+#define IONIC_INTR_NAME_MAX_SZ 32
struct ionic_intr_info {
- char name[INTR_NAME_MAX_SZ];
+ char name[IONIC_INTR_NAME_MAX_SZ];
unsigned int index;
unsigned int vector;
u64 rearm_count;
@@ -283,6 +286,8 @@ void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
+void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
+ u16 lif_type, u8 qtype, u8 qver);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
dma_addr_t addr);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 6996229facfd..f7e3ce3de04d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -12,10 +12,11 @@
#include "ionic_stats.h"
static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define PRIV_F_SW_DBG_STATS BIT(0)
+#define IONIC_PRIV_F_SW_DBG_STATS BIT(0)
"sw-dbg-stats",
};
-#define PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
+
+#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
@@ -58,7 +59,7 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset)
count = ionic_get_stats_count(lif);
break;
case ETH_SS_PRIV_FLAGS:
- count = PRIV_FLAGS_COUNT;
+ count = IONIC_PRIV_FLAGS_COUNT;
break;
}
return count;
@@ -75,7 +76,7 @@ static void ionic_get_strings(struct net_device *netdev,
break;
case ETH_SS_PRIV_FLAGS:
memcpy(buf, ionic_priv_flags_strings,
- PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
+ IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
break;
}
}
@@ -159,6 +160,8 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
break;
+ case IONIC_XCVR_PID_QSFP_100G_CWDM4:
+ case IONIC_XCVR_PID_QSFP_100G_PSM4:
case IONIC_XCVR_PID_QSFP_100G_LR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseLR4_ER4_Full);
@@ -178,6 +181,7 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
break;
case IONIC_XCVR_PID_SFP_25GBASE_SR:
case IONIC_XCVR_PID_SFP_25GBASE_AOC:
+ case IONIC_XCVR_PID_SFP_25GBASE_ACC:
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
@@ -458,9 +462,9 @@ static void ionic_get_ringparam(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
- ring->tx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->tx_max_pending = IONIC_MAX_TX_DESC;
ring->tx_pending = lif->ntxq_descs;
- ring->rx_max_pending = IONIC_MAX_TXRX_DESC;
+ ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs;
}
@@ -554,7 +558,7 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
u32 priv_flags = 0;
if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- priv_flags |= PRIV_F_SW_DBG_STATS;
+ priv_flags |= IONIC_PRIV_F_SW_DBG_STATS;
return priv_flags;
}
@@ -564,7 +568,7 @@ static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
struct ionic_lif *lif = netdev_priv(netdev);
clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
- if (priv_flags & PRIV_F_SW_DBG_STATS)
+ if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS)
set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
return 0;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index ceeb7629e7a0..7e22ba4ed915 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
-/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */
+/* Copyright (c) 2017-2020 Pensando Systems, Inc. All rights reserved. */
#ifndef _IONIC_IF_H_
#define _IONIC_IF_H_
@@ -9,7 +9,7 @@
#define IONIC_IFNAMSIZ 16
/**
- * Commands
+ * enum ionic_cmd_opcode - Device commands
*/
enum ionic_cmd_opcode {
IONIC_CMD_NOP = 0,
@@ -40,6 +40,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_RX_FILTER_DEL = 32,
/* Queue commands */
+ IONIC_CMD_Q_IDENTIFY = 39,
IONIC_CMD_Q_INIT = 40,
IONIC_CMD_Q_CONTROL = 41,
@@ -57,6 +58,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
IONIC_CMD_QOS_CLASS_RESET = 242,
+ IONIC_CMD_QOS_CLASS_UPDATE = 243,
/* Firmware commands */
IONIC_CMD_FW_DOWNLOAD = 254,
@@ -64,7 +66,7 @@ enum ionic_cmd_opcode {
};
/**
- * Command Return codes
+ * enum ionic_status_code - Device command return codes
*/
enum ionic_status_code {
IONIC_RC_SUCCESS = 0, /* Success */
@@ -97,6 +99,7 @@ enum ionic_notifyq_opcode {
IONIC_EVENT_RESET = 2,
IONIC_EVENT_HEARTBEAT = 3,
IONIC_EVENT_LOG = 4,
+ IONIC_EVENT_XCVR = 5,
};
/**
@@ -114,12 +117,11 @@ struct ionic_admin_cmd {
/**
* struct ionic_admin_comp - General admin command completion format
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @cmd_data: Command-specific bytes.
- * @color: Color bit. (Always 0 for commands issued to the
- * Device Cmd Registers.)
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @cmd_data: Command-specific bytes
+ * @color: Color bit (Always 0 for commands issued to the
+ * Device Cmd Registers)
*/
struct ionic_admin_comp {
u8 status;
@@ -146,7 +148,7 @@ struct ionic_nop_cmd {
/**
* struct ionic_nop_comp - NOP command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_nop_comp {
u8 status;
@@ -156,7 +158,7 @@ struct ionic_nop_comp {
/**
* struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
- * @type: device type
+ * @type: Device type
*/
struct ionic_dev_init_cmd {
u8 opcode;
@@ -166,7 +168,7 @@ struct ionic_dev_init_cmd {
/**
* struct init_comp - Device init command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_dev_init_comp {
u8 status;
@@ -184,7 +186,7 @@ struct ionic_dev_reset_cmd {
/**
* struct reset_comp - Reset command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_dev_reset_comp {
u8 status;
@@ -205,8 +207,8 @@ struct ionic_dev_identify_cmd {
};
/**
- * struct dev_identify_comp - Driver/device identify command completion
- * @status: The status of the command (enum status_code)
+ * struct ionic_dev_identify_comp - Driver/device identify command completion
+ * @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
*/
struct ionic_dev_identify_comp {
@@ -225,8 +227,8 @@ enum ionic_os_type {
};
/**
- * union drv_identity - driver identity information
- * @os_type: OS type (see enum os_type)
+ * union ionic_drv_identity - driver identity information
+ * @os_type: OS type (see enum ionic_os_type)
* @os_dist: OS distribution, numeric format
* @os_dist_str: OS distribution, string format
* @kernel_ver: Kernel version, numeric format
@@ -242,26 +244,26 @@ union ionic_drv_identity {
char kernel_ver_str[32];
char driver_ver_str[32];
};
- __le32 words[512];
+ __le32 words[478];
};
/**
- * union dev_identity - device identity information
+ * union ionic_dev_identity - device identity information
* @version: Version of device identify
* @type: Identify type (0 for now)
* @nports: Number of ports provisioned
* @nlifs: Number of LIFs provisioned
* @nintrs: Number of interrupts provisioned
* @ndbpgs_per_lif: Number of doorbell pages per LIF
- * @intr_coal_mult: Interrupt coalescing multiplication factor.
+ * @intr_coal_mult: Interrupt coalescing multiplication factor
* Scale user-supplied interrupt coalescing
* value in usecs to device units using:
* device units = usecs * mult / div
- * @intr_coal_div: Interrupt coalescing division factor.
+ * @intr_coal_div: Interrupt coalescing division factor
* Scale user-supplied interrupt coalescing
* value in usecs to device units using:
* device units = usecs * mult / div
- *
+ * @eq_count: Number of shared event queues
*/
union ionic_dev_identity {
struct {
@@ -275,8 +277,9 @@ union ionic_dev_identity {
__le32 ndbpgs_per_lif;
__le32 intr_coal_mult;
__le32 intr_coal_div;
+ __le32 eq_count;
};
- __le32 words[512];
+ __le32 words[478];
};
enum ionic_lif_type {
@@ -286,10 +289,10 @@ enum ionic_lif_type {
};
/**
- * struct ionic_lif_identify_cmd - lif identify command
+ * struct ionic_lif_identify_cmd - LIF identify command
* @opcode: opcode
- * @type: lif type (enum lif_type)
- * @ver: version of identify returned by device
+ * @type: LIF type (enum ionic_lif_type)
+ * @ver: Version of identify returned by device
*/
struct ionic_lif_identify_cmd {
u8 opcode;
@@ -299,9 +302,9 @@ struct ionic_lif_identify_cmd {
};
/**
- * struct ionic_lif_identify_comp - lif identify command completion
- * @status: status of the command (enum status_code)
- * @ver: version of identify returned by device
+ * struct ionic_lif_identify_comp - LIF identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @ver: Version of identify returned by device
*/
struct ionic_lif_identify_comp {
u8 status;
@@ -309,13 +312,24 @@ struct ionic_lif_identify_comp {
u8 rsvd2[14];
};
+/**
+ * enum ionic_lif_capability - LIF capabilities
+ * @IONIC_LIF_CAP_ETH: LIF supports Ethernet
+ * @IONIC_LIF_CAP_RDMA: LIF support RDMA
+ */
enum ionic_lif_capability {
IONIC_LIF_CAP_ETH = BIT(0),
IONIC_LIF_CAP_RDMA = BIT(1),
};
/**
- * Logical Queue Types
+ * enum ionic_logical_qtype - Logical Queue Types
+ * @IONIC_QTYPE_ADMINQ: Administrative Queue
+ * @IONIC_QTYPE_NOTIFYQ: Notify Queue
+ * @IONIC_QTYPE_RXQ: Receive Queue
+ * @IONIC_QTYPE_TXQ: Transmit Queue
+ * @IONIC_QTYPE_EQ: Event Queue
+ * @IONIC_QTYPE_MAX: Max queue type supported
*/
enum ionic_logical_qtype {
IONIC_QTYPE_ADMINQ = 0,
@@ -327,10 +341,10 @@ enum ionic_logical_qtype {
};
/**
- * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue type.
- * @qtype: Hardware Queue Type.
- * @qid_count: Number of Queue IDs of the logical type.
- * @qid_base: Minimum Queue ID of the logical type.
+ * struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type
+ * @qtype: Hardware Queue Type
+ * @qid_count: Number of Queue IDs of the logical type
+ * @qid_base: Minimum Queue ID of the logical type
*/
struct ionic_lif_logical_qtype {
u8 qtype;
@@ -339,6 +353,12 @@ struct ionic_lif_logical_qtype {
__le32 qid_base;
};
+/**
+ * enum ionic_lif_state - LIF state
+ * @IONIC_LIF_DISABLE: LIF disabled
+ * @IONIC_LIF_ENABLE: LIF enabled
+ * @IONIC_LIF_HANG_RESET: LIF hung, being reset
+ */
enum ionic_lif_state {
IONIC_LIF_DISABLE = 0,
IONIC_LIF_ENABLE = 1,
@@ -346,13 +366,13 @@ enum ionic_lif_state {
};
/**
- * LIF configuration
- * @state: lif state (enum lif_state)
- * @name: lif name
- * @mtu: mtu
- * @mac: station mac address
- * @features: features (enum ionic_eth_hw_features)
- * @queue_count: queue counts per queue-type
+ * union ionic_lif_config - LIF configuration
+ * @state: LIF state (enum ionic_lif_state)
+ * @name: LIF name
+ * @mtu: MTU
+ * @mac: Station MAC address
+ * @features: Features (enum ionic_eth_hw_features)
+ * @queue_count: Queue counts per queue-type
*/
union ionic_lif_config {
struct {
@@ -369,37 +389,36 @@ union ionic_lif_config {
};
/**
- * struct ionic_lif_identity - lif identity information (type-specific)
+ * struct ionic_lif_identity - LIF identity information (type-specific)
*
- * @capabilities LIF capabilities
+ * @capabilities: LIF capabilities
*
- * Ethernet:
- * @version: Ethernet identify structure version.
- * @features: Ethernet features supported on this lif type.
- * @max_ucast_filters: Number of perfect unicast addresses supported.
- * @max_mcast_filters: Number of perfect multicast addresses supported.
- * @min_frame_size: Minimum size of frames to be sent
- * @max_frame_size: Maximim size of frames to be sent
- * @config: LIF config struct with features, mtu, mac, q counts
+ * @eth: Ethernet identify structure
+ * @version: Ethernet identify structure version
+ * @max_ucast_filters: Number of perfect unicast addresses supported
+ * @max_mcast_filters: Number of perfect multicast addresses supported
+ * @min_frame_size: Minimum size of frames to be sent
+ * @max_frame_size: Maximim size of frames to be sent
+ * @config: LIF config struct with features, mtu, mac, q counts
*
- * RDMA:
- * @version: RDMA version of opcodes and queue descriptors.
- * @qp_opcodes: Number of rdma queue pair opcodes supported.
- * @admin_opcodes: Number of rdma admin opcodes supported.
- * @npts_per_lif: Page table size per lif
- * @nmrs_per_lif: Number of memory regions per lif
- * @nahs_per_lif: Number of address handles per lif
- * @max_stride: Max work request stride.
- * @cl_stride: Cache line stride.
- * @pte_stride: Page table entry stride.
- * @rrq_stride: Remote RQ work request stride.
- * @rsq_stride: Remote SQ work request stride.
+ * @rdma: RDMA identify structure
+ * @version: RDMA version of opcodes and queue descriptors
+ * @qp_opcodes: Number of RDMA queue pair opcodes supported
+ * @admin_opcodes: Number of RDMA admin opcodes supported
+ * @npts_per_lif: Page table size per LIF
+ * @nmrs_per_lif: Number of memory regions per LIF
+ * @nahs_per_lif: Number of address handles per LIF
+ * @max_stride: Max work request stride
+ * @cl_stride: Cache line stride
+ * @pte_stride: Page table entry stride
+ * @rrq_stride: Remote RQ work request stride
+ * @rsq_stride: Remote SQ work request stride
* @dcqcn_profiles: Number of DCQCN profiles
- * @aq_qtype: RDMA Admin Qtype.
- * @sq_qtype: RDMA Send Qtype.
- * @rq_qtype: RDMA Receive Qtype.
- * @cq_qtype: RDMA Completion Qtype.
- * @eq_qtype: RDMA Event Qtype.
+ * @aq_qtype: RDMA Admin Qtype
+ * @sq_qtype: RDMA Send Qtype
+ * @rq_qtype: RDMA Receive Qtype
+ * @cq_qtype: RDMA Completion Qtype
+ * @eq_qtype: RDMA Event Qtype
*/
union ionic_lif_identity {
struct {
@@ -439,15 +458,15 @@ union ionic_lif_identity {
struct ionic_lif_logical_qtype eq_qtype;
} __packed rdma;
} __packed;
- __le32 words[512];
+ __le32 words[478];
};
/**
* struct ionic_lif_init_cmd - LIF init command
- * @opcode: opcode
- * @type: LIF type (enum lif_type)
+ * @opcode: Opcode
+ * @type: LIF type (enum ionic_lif_type)
* @index: LIF index
- * @info_pa: destination address for lif info (struct ionic_lif_info)
+ * @info_pa: Destination address for LIF info (struct ionic_lif_info)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -460,7 +479,8 @@ struct ionic_lif_init_cmd {
/**
* struct ionic_lif_init_comp - LIF init command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
+ * @hw_index: Hardware index of the initialized LIF
*/
struct ionic_lif_init_comp {
u8 status;
@@ -469,14 +489,74 @@ struct ionic_lif_init_comp {
u8 rsvd2[12];
};
+ /**
+ * struct ionic_q_identify_cmd - queue identify command
+ * @opcode: opcode
+ * @lif_type: LIF type (enum ionic_lif_type)
+ * @type: Logical queue type (enum ionic_logical_qtype)
+ * @ver: Highest queue type version that the driver supports
+ */
+struct ionic_q_identify_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 lif_type;
+ u8 type;
+ u8 ver;
+ u8 rsvd2[58];
+};
+
+/**
+ * struct ionic_q_identify_comp - queue identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @ver: Queue type version that can be used with FW
+ */
+struct ionic_q_identify_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 ver;
+ u8 rsvd2[11];
+};
+
+/**
+ * union ionic_q_identity - queue identity information
+ * @version: Queue type version that can be used with FW
+ * @supported: Bitfield of queue versions, first bit = ver 0
+ * @features: Queue features
+ * @desc_sz: Descriptor size
+ * @comp_sz: Completion descriptor size
+ * @sg_desc_sz: Scatter/Gather descriptor size
+ * @max_sg_elems: Maximum number of Scatter/Gather elements
+ * @sg_desc_stride: Number of Scatter/Gather elements per descriptor
+ */
+union ionic_q_identity {
+ struct {
+ u8 version;
+ u8 supported;
+ u8 rsvd[6];
+#define IONIC_QIDENT_F_CQ 0x01 /* queue has completion ring */
+#define IONIC_QIDENT_F_SG 0x02 /* queue has scatter/gather ring */
+#define IONIC_QIDENT_F_EQ 0x04 /* queue can use event queue */
+#define IONIC_QIDENT_F_CMB 0x08 /* queue is in cmb bar */
+ __le64 features;
+ __le16 desc_sz;
+ __le16 comp_sz;
+ __le16 sg_desc_sz;
+ __le16 max_sg_elems;
+ __le16 sg_desc_stride;
+ };
+ __le32 words[478];
+};
+
/**
* struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
* @type: Logical queue type
- * @ver: Queue version (defines opcode/descriptor scope)
+ * @ver: Queue type version
* @lif_index: LIF index
- * @index: (lif, qtype) relative admin queue index
- * @intr_index: Interrupt control register index
+ * @index: (LIF, qtype) relative admin queue index
+ * @intr_index: Interrupt control register index, or Event queue index
* @pid: Process ID
* @flags:
* IRQ: Interrupt requested on completion
@@ -494,12 +574,11 @@ struct ionic_lif_init_comp {
* descriptors. Values of ring_size <2 and >16 are
* reserved.
* EQ: Enable the Event Queue
- * @cos: Class of service for this queue.
+ * @cos: Class of service for this queue
* @ring_size: Queue ring size, encoded as a log2(size)
* @ring_base: Queue ring base address
* @cq_ring_base: Completion queue ring base address
* @sg_ring_base: Scatter/Gather ring base address
- * @eq_index: Event queue index
*/
struct ionic_q_init_cmd {
u8 opcode;
@@ -516,29 +595,27 @@ struct ionic_q_init_cmd {
#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */
#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */
#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */
-#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
+#define IONIC_QINIT_F_CMB 0x10 /* Enable cmb-based queue */
+#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */
u8 cos;
u8 ring_size;
__le64 ring_base;
__le64 cq_ring_base;
__le64 sg_ring_base;
- __le32 eq_index;
- u8 rsvd2[16];
+ u8 rsvd2[20];
} __packed;
/**
* struct ionic_q_init_comp - Queue init command completion
- * @status: The status of the command (enum status_code)
- * @ver: Queue version (defines opcode/descriptor scope)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @hw_index: Hardware Queue ID
* @hw_type: Hardware Queue type
* @color: Color
*/
struct ionic_q_init_comp {
u8 status;
- u8 ver;
+ u8 rsvd;
__le16 comp_index;
__le32 hw_index;
u8 hw_type;
@@ -559,10 +636,9 @@ enum ionic_txq_desc_opcode {
/**
* struct ionic_txq_desc - Ethernet Tx queue descriptor format
- * @opcode: Tx operation, see TXQ_DESC_OPCODE_*:
+ * @cmd: Tx operation, see IONIC_TXQ_DESC_OPCODE_*:
*
* IONIC_TXQ_DESC_OPCODE_CSUM_NONE:
- *
* Non-offload send. No segmentation,
* fragmentation or checksum calc/insertion is
* performed by device; packet is prepared
@@ -570,7 +646,6 @@ enum ionic_txq_desc_opcode {
* no further manipulation from device.
*
* IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL:
- *
* Offload 16-bit L4 checksum
* calculation/insertion. The device will
* calculate the L4 checksum value and
@@ -579,14 +654,16 @@ enum ionic_txq_desc_opcode {
* is calculated starting at @csum_start bytes
* into the packet to the end of the packet.
* The checksum insertion position is given
- * in @csum_offset. This feature is only
- * applicable to protocols such as TCP, UDP
- * and ICMP where a standard (i.e. the
- * 'IP-style' checksum) one's complement
- * 16-bit checksum is used, using an IP
- * pseudo-header to seed the calculation.
- * Software will preload the L4 checksum
- * field with the IP pseudo-header checksum.
+ * in @csum_offset, which is the offset from
+ * @csum_start to the checksum field in the L4
+ * header. This feature is only applicable to
+ * protocols such as TCP, UDP and ICMP where a
+ * standard (i.e. the 'IP-style' checksum)
+ * one's complement 16-bit checksum is used,
+ * using an IP pseudo-header to seed the
+ * calculation. Software will preload the L4
+ * checksum field with the IP pseudo-header
+ * checksum.
*
* For tunnel encapsulation, @csum_start and
* @csum_offset refer to the inner L4
@@ -602,7 +679,6 @@ enum ionic_txq_desc_opcode {
* for more info).
*
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
- *
* Offload 16-bit checksum computation to hardware.
* If @csum_l3 is set then the packet's L3 checksum is
* updated. Similarly, if @csum_l4 is set the the L4
@@ -610,7 +686,6 @@ enum ionic_txq_desc_opcode {
* checksums are also updated.
*
* IONIC_TXQ_DESC_OPCODE_TSO:
- *
* Device preforms TCP segmentation offload
* (TSO). @hdr_len is the number of bytes
* to the end of TCP header (the offset to
@@ -637,40 +712,41 @@ enum ionic_txq_desc_opcode {
* clear CWR in remaining segments.
* @flags:
* vlan:
- * Insert an L2 VLAN header using @vlan_tci.
+ * Insert an L2 VLAN header using @vlan_tci
* encap:
- * Calculate encap header checksum.
+ * Calculate encap header checksum
* csum_l3:
- * Compute L3 header checksum.
+ * Compute L3 header checksum
* csum_l4:
- * Compute L4 header checksum.
+ * Compute L4 header checksum
* tso_sot:
* TSO start
* tso_eot:
* TSO end
* @num_sg_elems: Number of scatter-gather elements in SG
* descriptor
- * @addr: First data buffer's DMA address.
- * (Subsequent data buffers are on txq_sg_desc).
+ * @addr: First data buffer's DMA address
+ * (Subsequent data buffers are on txq_sg_desc)
* @len: First data buffer's length, in bytes
* @vlan_tci: VLAN tag to insert in the packet (if requested
* by @V-bit). Includes .1p and .1q tags
* @hdr_len: Length of packet headers, including
- * encapsulating outer header, if applicable.
- * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and
- * TXQ_DESC_OPCODE_TSO. Should be set to zero for
+ * encapsulating outer header, if applicable
+ * Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and
+ * IONIC_TXQ_DESC_OPCODE_TSO. Should be set to zero for
* all other modes. For
- * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
+ * IONIC_TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length
* of headers up to inner-most L4 header. For
- * TXQ_DESC_OPCODE_TSO, @hdr_len is up to
+ * IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to
* inner-most L4 payload, so inclusive of
* inner-most L4 header.
- * @mss: Desired MSS value for TSO. Only applicable for
- * TXQ_DESC_OPCODE_TSO.
- * @csum_start: Offset into inner-most L3 header of checksum
- * @csum_offset: Offset into inner-most L4 header of checksum
+ * @mss: Desired MSS value for TSO; only applicable for
+ * IONIC_TXQ_DESC_OPCODE_TSO
+ * @csum_start: Offset from packet to first byte checked in L4 checksum
+ * @csum_offset: Offset from csum_start to L4 checksum field
*/
-
+struct ionic_txq_desc {
+ __le64 cmd;
#define IONIC_TXQ_DESC_OPCODE_MASK 0xf
#define IONIC_TXQ_DESC_OPCODE_SHIFT 4
#define IONIC_TXQ_DESC_FLAGS_MASK 0xf
@@ -692,8 +768,6 @@ enum ionic_txq_desc_opcode {
#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4
#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8
-struct ionic_txq_desc {
- __le64 cmd;
__le16 len;
union {
__le16 vlan_tci;
@@ -733,28 +807,38 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
*addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK;
};
-#define IONIC_TX_MAX_SG_ELEMS 8
-#define IONIC_RX_MAX_SG_ELEMS 8
-
/**
- * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
+ * struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
+struct ionic_txq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+};
+
+/**
+ * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
+ * @elems: Scatter-gather elements
+ */
struct ionic_txq_sg_desc {
- struct ionic_txq_sg_elem {
- __le64 addr;
- __le16 len;
- __le16 rsvd[3];
- } elems[IONIC_TX_MAX_SG_ELEMS];
+#define IONIC_TX_MAX_SG_ELEMS 8
+#define IONIC_TX_SG_DESC_STRIDE 8
+ struct ionic_txq_sg_elem elems[IONIC_TX_MAX_SG_ELEMS];
+};
+
+struct ionic_txq_sg_desc_v1 {
+#define IONIC_TX_MAX_SG_ELEMS_V1 15
+#define IONIC_TX_SG_DESC_STRIDE_V1 16
+ struct ionic_txq_sg_elem elems[IONIC_TX_SG_DESC_STRIDE_V1];
};
/**
* struct ionic_txq_comp - Ethernet transmit queue completion descriptor
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @color: Color bit.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @color: Color bit
*/
struct ionic_txq_comp {
u8 status;
@@ -771,16 +855,15 @@ enum ionic_rxq_desc_opcode {
/**
* struct ionic_rxq_desc - Ethernet Rx queue descriptor format
- * @opcode: Rx operation, see RXQ_DESC_OPCODE_*:
- *
- * RXQ_DESC_OPCODE_SIMPLE:
+ * @opcode: Rx operation, see IONIC_RXQ_DESC_OPCODE_*:
*
+ * IONIC_RXQ_DESC_OPCODE_SIMPLE:
* Receive full packet into data buffer
* starting at @addr. Results of
* receive, including actual bytes received,
* are recorded in Rx completion descriptor.
*
- * @len: Data buffer's length, in bytes.
+ * @len: Data buffer's length, in bytes
* @addr: Data buffer's DMA address
*/
struct ionic_rxq_desc {
@@ -791,26 +874,33 @@ struct ionic_rxq_desc {
};
/**
- * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
+ * struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
+struct ionic_rxq_sg_elem {
+ __le64 addr;
+ __le16 len;
+ __le16 rsvd[3];
+};
+
+/**
+ * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
+ * @elems: Scatter-gather elements
+ */
struct ionic_rxq_sg_desc {
- struct ionic_rxq_sg_elem {
- __le64 addr;
- __le16 len;
- __le16 rsvd[3];
- } elems[IONIC_RX_MAX_SG_ELEMS];
+#define IONIC_RX_MAX_SG_ELEMS 8
+#define IONIC_RX_SG_DESC_STRIDE 8
+ struct ionic_rxq_sg_elem elems[IONIC_RX_SG_DESC_STRIDE];
};
/**
* struct ionic_rxq_comp - Ethernet receive queue completion descriptor
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @num_sg_elems: Number of SG elements used by this descriptor
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @rss_hash: 32-bit RSS hash
- * @csum: 16-bit sum of the packet's L2 payload.
+ * @csum: 16-bit sum of the packet's L2 payload
* If the packet's L2 payload is odd length, an extra
* zero-value byte is included in the @csum calculation but
* not included in @len.
@@ -818,33 +908,51 @@ struct ionic_rxq_sg_desc {
* set. Includes .1p and .1q tags.
* @len: Received packet length, in bytes. Excludes FCS.
* @csum_calc L2 payload checksum is computed or not
- * @csum_tcp_ok: The TCP checksum calculated by the device
- * matched the checksum in the receive packet's
- * TCP header
- * @csum_tcp_bad: The TCP checksum calculated by the device did
- * not match the checksum in the receive packet's
- * TCP header.
- * @csum_udp_ok: The UDP checksum calculated by the device
- * matched the checksum in the receive packet's
- * UDP header
- * @csum_udp_bad: The UDP checksum calculated by the device did
- * not match the checksum in the receive packet's
- * UDP header.
- * @csum_ip_ok: The IPv4 checksum calculated by the device
- * matched the checksum in the receive packet's
- * first IPv4 header. If the receive packet
- * contains both a tunnel IPv4 header and a
- * transport IPv4 header, the device validates the
- * checksum for the both IPv4 headers.
- * @csum_ip_bad: The IPv4 checksum calculated by the device did
- * not match the checksum in the receive packet's
- * first IPv4 header. If the receive packet
- * contains both a tunnel IPv4 header and a
- * transport IPv4 header, the device validates the
- * checksum for both IP headers.
- * @VLAN: VLAN header was stripped and placed in @vlan_tci.
- * @pkt_type: Packet type
- * @color: Color bit.
+ * @csum_flags: See IONIC_RXQ_COMP_CSUM_F_*:
+ *
+ * IONIC_RXQ_COMP_CSUM_F_TCP_OK:
+ * The TCP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * TCP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_TCP_BAD:
+ * The TCP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * TCP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_UDP_OK:
+ * The UDP checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * UDP header
+ *
+ * IONIC_RXQ_COMP_CSUM_F_UDP_BAD:
+ * The UDP checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * UDP header.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_IP_OK:
+ * The IPv4 checksum calculated by the device
+ * matched the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for the both IPv4 headers.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_IP_BAD:
+ * The IPv4 checksum calculated by the device did
+ * not match the checksum in the receive packet's
+ * first IPv4 header. If the receive packet
+ * contains both a tunnel IPv4 header and a
+ * transport IPv4 header, the device validates the
+ * checksum for both IP headers.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_VLAN:
+ * The VLAN header was stripped and placed in @vlan_tci.
+ *
+ * IONIC_RXQ_COMP_CSUM_F_CALC:
+ * The checksum was calculated by the device.
+ *
+ * @pkt_type_color: Packet type and color bit; see IONIC_RXQ_COMP_PKT_TYPE_MASK
*/
struct ionic_rxq_comp {
u8 status;
@@ -891,8 +999,8 @@ enum ionic_eth_hw_features {
IONIC_ETH_HW_TSO_ECN = BIT(10),
IONIC_ETH_HW_TSO_GRE = BIT(11),
IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12),
- IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
- IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
+ IONIC_ETH_HW_TSO_IPXIP4 = BIT(13),
+ IONIC_ETH_HW_TSO_IPXIP6 = BIT(14),
IONIC_ETH_HW_TSO_UDP = BIT(15),
IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16),
};
@@ -923,7 +1031,10 @@ enum q_control_oper {
};
/**
- * Physical connection type
+ * enum ionic_phy_type - Physical connection type
+ * @IONIC_PHY_TYPE_NONE: No PHY installed
+ * @IONIC_PHY_TYPE_COPPER: Copper PHY
+ * @IONIC_PHY_TYPE_FIBER: Fiber PHY
*/
enum ionic_phy_type {
IONIC_PHY_TYPE_NONE = 0,
@@ -932,18 +1043,23 @@ enum ionic_phy_type {
};
/**
- * Transceiver status
+ * enum ionic_xcvr_state - Transceiver status
+ * @IONIC_XCVR_STATE_REMOVED: Transceiver removed
+ * @IONIC_XCVR_STATE_INSERTED: Transceiver inserted
+ * @IONIC_XCVR_STATE_PENDING: Transceiver pending
+ * @IONIC_XCVR_STATE_SPROM_READ: Transceiver data read
+ * @IONIC_XCVR_STATE_SPROM_READ_ERR: Transceiver data read error
*/
enum ionic_xcvr_state {
IONIC_XCVR_STATE_REMOVED = 0,
IONIC_XCVR_STATE_INSERTED = 1,
IONIC_XCVR_STATE_PENDING = 2,
IONIC_XCVR_STATE_SPROM_READ = 3,
- IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
+ IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
};
/**
- * Supported link modes
+ * enum ionic_xcvr_pid - Supported link modes
*/
enum ionic_xcvr_pid {
IONIC_XCVR_PID_UNKNOWN = 0,
@@ -977,64 +1093,83 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_10GBASE_CU = 68,
IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69,
IONIC_XCVR_PID_QSFP_100G_PSM4 = 70,
+ IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
};
/**
- * Port types
+ * enum ionic_port_type - Port types
+ * @IONIC_PORT_TYPE_NONE: Port type not configured
+ * @IONIC_PORT_TYPE_ETH: Port carries ethernet traffic (inband)
+ * @IONIC_PORT_TYPE_MGMT: Port carries mgmt traffic (out-of-band)
*/
enum ionic_port_type {
- IONIC_PORT_TYPE_NONE = 0, /* port type not configured */
- IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */
- IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */
+ IONIC_PORT_TYPE_NONE = 0,
+ IONIC_PORT_TYPE_ETH = 1,
+ IONIC_PORT_TYPE_MGMT = 2,
};
/**
- * Port config state
+ * enum ionic_port_admin_state - Port config state
+ * @IONIC_PORT_ADMIN_STATE_NONE: Port admin state not configured
+ * @IONIC_PORT_ADMIN_STATE_DOWN: Port admin disabled
+ * @IONIC_PORT_ADMIN_STATE_UP: Port admin enabled
*/
enum ionic_port_admin_state {
- IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */
- IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */
- IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */
+ IONIC_PORT_ADMIN_STATE_NONE = 0,
+ IONIC_PORT_ADMIN_STATE_DOWN = 1,
+ IONIC_PORT_ADMIN_STATE_UP = 2,
};
/**
- * Port operational status
+ * enum ionic_port_oper_status - Port operational status
+ * @IONIC_PORT_OPER_STATUS_NONE: Port disabled
+ * @IONIC_PORT_OPER_STATUS_UP: Port link status up
+ * @IONIC_PORT_OPER_STATUS_DOWN: Port link status down
*/
enum ionic_port_oper_status {
- IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */
- IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */
- IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */
+ IONIC_PORT_OPER_STATUS_NONE = 0,
+ IONIC_PORT_OPER_STATUS_UP = 1,
+ IONIC_PORT_OPER_STATUS_DOWN = 2,
};
/**
- * Ethernet Forward error correction (fec) modes
+ * enum ionic_port_fec_type - Ethernet Forward error correction (FEC) modes
+ * @IONIC_PORT_FEC_TYPE_NONE: FEC Disabled
+ * @IONIC_PORT_FEC_TYPE_FC: FireCode FEC
+ * @IONIC_PORT_FEC_TYPE_RS: ReedSolomon FEC
*/
enum ionic_port_fec_type {
- IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */
- IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */
- IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */
+ IONIC_PORT_FEC_TYPE_NONE = 0,
+ IONIC_PORT_FEC_TYPE_FC = 1,
+ IONIC_PORT_FEC_TYPE_RS = 2,
};
/**
- * Ethernet pause (flow control) modes
+ * enum ionic_port_pause_type - Ethernet pause (flow control) modes
+ * @IONIC_PORT_PAUSE_TYPE_NONE: Disable Pause
+ * @IONIC_PORT_PAUSE_TYPE_LINK: Link level pause
+ * @IONIC_PORT_PAUSE_TYPE_PFC: Priority-Flow Control
*/
enum ionic_port_pause_type {
- IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */
- IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */
- IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */
+ IONIC_PORT_PAUSE_TYPE_NONE = 0,
+ IONIC_PORT_PAUSE_TYPE_LINK = 1,
+ IONIC_PORT_PAUSE_TYPE_PFC = 2,
};
/**
- * Loopback modes
+ * enum ionic_port_loopback_mode - Loopback modes
+ * @IONIC_PORT_LOOPBACK_MODE_NONE: Disable loopback
+ * @IONIC_PORT_LOOPBACK_MODE_MAC: MAC loopback
+ * @IONIC_PORT_LOOPBACK_MODE_PHY: PHY/SerDes loopback
*/
enum ionic_port_loopback_mode {
- IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */
- IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */
- IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */
+ IONIC_PORT_LOOPBACK_MODE_NONE = 0,
+ IONIC_PORT_LOOPBACK_MODE_MAC = 1,
+ IONIC_PORT_LOOPBACK_MODE_PHY = 2,
};
/**
- * Transceiver Status information
+ * struct ionic_xcvr_status - Transceiver Status information
* @state: Transceiver status (enum ionic_xcvr_state)
* @phy: Physical connection type (enum ionic_phy_type)
* @pid: Transceiver link mode (enum pid)
@@ -1048,7 +1183,7 @@ struct ionic_xcvr_status {
};
/**
- * Port configuration
+ * union ionic_port_config - Port configuration
* @speed: port speed (in Mbps)
* @mtu: mtu
* @state: port admin state (enum port_admin_state)
@@ -1081,17 +1216,21 @@ union ionic_port_config {
};
/**
- * Port Status information
+ * struct ionic_port_status - Port Status information
* @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
+ * @link_down_count: number of times link went from from up to down
+ * @fec_type: fec type (enum ionic_port_fec_type)
* @xcvr: tranceiver status
*/
struct ionic_port_status {
__le32 id;
__le32 speed;
u8 status;
- u8 rsvd[51];
+ __le16 link_down_count;
+ u8 fec_type;
+ u8 rsvd[48];
struct ionic_xcvr_status xcvr;
} __packed;
@@ -1110,7 +1249,7 @@ struct ionic_port_identify_cmd {
/**
* struct ionic_port_identify_comp - Port identify command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
*/
struct ionic_port_identify_comp {
@@ -1135,7 +1274,7 @@ struct ionic_port_init_cmd {
/**
* struct ionic_port_init_comp - Port initialization command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_port_init_comp {
u8 status;
@@ -1155,7 +1294,7 @@ struct ionic_port_reset_cmd {
/**
* struct ionic_port_reset_comp - Port reset command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
*/
struct ionic_port_reset_comp {
u8 status;
@@ -1163,15 +1302,23 @@ struct ionic_port_reset_comp {
};
/**
- * enum stats_ctl_cmd - List of commands for stats control
+ * enum ionic_stats_ctl_cmd - List of commands for stats control
+ * @IONIC_STATS_CTL_RESET: Reset statistics
*/
enum ionic_stats_ctl_cmd {
IONIC_STATS_CTL_RESET = 0,
};
-
/**
* enum ionic_port_attr - List of device attributes
+ * @IONIC_PORT_ATTR_STATE: Port state attribute
+ * @IONIC_PORT_ATTR_SPEED: Port speed attribute
+ * @IONIC_PORT_ATTR_MTU: Port MTU attribute
+ * @IONIC_PORT_ATTR_AUTONEG: Port autonegotation attribute
+ * @IONIC_PORT_ATTR_FEC: Port FEC attribute
+ * @IONIC_PORT_ATTR_PAUSE: Port pause attribute
+ * @IONIC_PORT_ATTR_LOOPBACK: Port loopback attribute
+ * @IONIC_PORT_ATTR_STATS_CTRL: Port statistics control attribute
*/
enum ionic_port_attr {
IONIC_PORT_ATTR_STATE = 0,
@@ -1186,9 +1333,17 @@ enum ionic_port_attr {
/**
* struct ionic_port_setattr_cmd - Set port attributes on the NIC
- * @opcode: Opcode
- * @index: port index
- * @attr: Attribute type (enum ionic_port_attr)
+ * @opcode: Opcode
+ * @index: Port index
+ * @attr: Attribute type (enum ionic_port_attr)
+ * @state: Port state
+ * @speed: Port speed
+ * @mtu: Port MTU
+ * @an_enable: Port autonegotiation setting
+ * @fec_type: Port FEC type setting
+ * @pause_type: Port pause type setting
+ * @loopback_mode: Port loopback mode
+ * @stats_ctl: Port stats setting
*/
struct ionic_port_setattr_cmd {
u8 opcode;
@@ -1203,14 +1358,14 @@ struct ionic_port_setattr_cmd {
u8 fec_type;
u8 pause_type;
u8 loopback_mode;
- u8 stats_ctl;
+ u8 stats_ctl;
u8 rsvd2[60];
};
};
/**
* struct ionic_port_setattr_comp - Port set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @color: Color bit
*/
struct ionic_port_setattr_comp {
@@ -1234,8 +1389,15 @@ struct ionic_port_getattr_cmd {
/**
* struct ionic_port_getattr_comp - Port get attr command completion
- * @status: The status of the command (enum status_code)
- * @color: Color bit
+ * @status: Status of the command (enum ionic_status_code)
+ * @state: Port state
+ * @speed: Port speed
+ * @mtu: Port MTU
+ * @an_enable: Port autonegotiation setting
+ * @fec_type: Port FEC type setting
+ * @pause_type: Port pause type setting
+ * @loopback_mode: Port loopback mode
+ * @color: Color bit
*/
struct ionic_port_getattr_comp {
u8 status;
@@ -1254,12 +1416,12 @@ struct ionic_port_getattr_comp {
};
/**
- * struct ionic_lif_status - Lif status register
+ * struct ionic_lif_status - LIF status register
* @eid: most recent NotifyQ event id
- * @port_num: port the lif is connected to
+ * @port_num: port the LIF is connected to
* @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
- * @link_down_count: number of times link status changes
+ * @link_down_count: number of times link went from up to down
*/
struct ionic_lif_status {
__le64 eid;
@@ -1293,6 +1455,9 @@ enum ionic_dev_state {
/**
* enum ionic_dev_attr - List of device attributes
+ * @IONIC_DEV_ATTR_STATE: Device state attribute
+ * @IONIC_DEV_ATTR_NAME: Device name attribute
+ * @IONIC_DEV_ATTR_FEATURES: Device feature attributes
*/
enum ionic_dev_attr {
IONIC_DEV_ATTR_STATE = 0,
@@ -1322,7 +1487,7 @@ struct ionic_dev_setattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @features: Device features
* @color: Color bit
*/
@@ -1349,7 +1514,7 @@ struct ionic_dev_getattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
- * @status: The status of the command (enum status_code)
+ * @status: Status of the command (enum ionic_status_code)
* @features: Device features
* @color: Color bit
*/
@@ -1379,6 +1544,13 @@ enum ionic_rss_hash_types {
/**
* enum ionic_lif_attr - List of LIF attributes
+ * @IONIC_LIF_ATTR_STATE: LIF state attribute
+ * @IONIC_LIF_ATTR_NAME: LIF name attribute
+ * @IONIC_LIF_ATTR_MTU: LIF MTU attribute
+ * @IONIC_LIF_ATTR_MAC: LIF MAC attribute
+ * @IONIC_LIF_ATTR_FEATURES: LIF features attribute
+ * @IONIC_LIF_ATTR_RSS: LIF RSS attribute
+ * @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1393,18 +1565,18 @@ enum ionic_lif_attr {
/**
* struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC
* @opcode: Opcode
- * @type: Attribute type (enum ionic_lif_attr)
+ * @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
- * @state: lif state (enum lif_state)
+ * @state: LIF state (enum ionic_lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
- * @types: The hash types to enable (see rss_hash_types).
- * @key: The hash secret key.
- * @addr: Address for the indirection table shared memory.
- * @stats_ctl: stats control commands (enum stats_ctl_cmd)
+ * @types: The hash types to enable (see rss_hash_types)
+ * @key: The hash secret key
+ * @addr: Address for the indirection table shared memory
+ * @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd)
*/
struct ionic_lif_setattr_cmd {
u8 opcode;
@@ -1422,16 +1594,15 @@ struct ionic_lif_setattr_cmd {
u8 rsvd[6];
__le64 addr;
} rss;
- u8 stats_ctl;
+ u8 stats_ctl;
u8 rsvd[60];
} __packed;
};
/**
* struct ionic_lif_setattr_comp - LIF set attr command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @features: features (enum ionic_eth_hw_features)
* @color: Color bit
*/
@@ -1461,10 +1632,9 @@ struct ionic_lif_getattr_cmd {
/**
* struct ionic_lif_getattr_comp - LIF get attr command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
- * @state: lif state (enum lif_state)
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @state: LIF state (enum ionic_lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
@@ -1486,11 +1656,12 @@ struct ionic_lif_getattr_comp {
};
enum ionic_rx_mode {
- IONIC_RX_MODE_F_UNICAST = BIT(0),
- IONIC_RX_MODE_F_MULTICAST = BIT(1),
- IONIC_RX_MODE_F_BROADCAST = BIT(2),
- IONIC_RX_MODE_F_PROMISC = BIT(3),
- IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+ IONIC_RX_MODE_F_UNICAST = BIT(0),
+ IONIC_RX_MODE_F_MULTICAST = BIT(1),
+ IONIC_RX_MODE_F_BROADCAST = BIT(2),
+ IONIC_RX_MODE_F_PROMISC = BIT(3),
+ IONIC_RX_MODE_F_ALLMULTI = BIT(4),
+ IONIC_RX_MODE_F_RDMA_SNIFFER = BIT(5),
};
/**
@@ -1498,11 +1669,12 @@ enum ionic_rx_mode {
* @opcode: opcode
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
- * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets.
- * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets.
- * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets.
- * IONIC_RX_MODE_F_PROMISC: Accept any packets.
- * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets.
+ * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets
+ * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets
+ * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets
+ * IONIC_RX_MODE_F_PROMISC: Accept any packets
+ * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets
+ * IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets
*/
struct ionic_rx_mode_set_cmd {
u8 opcode;
@@ -1526,9 +1698,14 @@ enum ionic_rx_filter_match_type {
* @qtype: Queue type
* @lif_index: LIF index
* @qid: Queue ID
- * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx)
- * @vlan: VLAN ID
- * @addr: MAC address (network-byte order)
+ * @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx)
+ * @vlan: VLAN filter
+ * @vlan: VLAN ID
+ * @mac: MAC filter
+ * @addr: MAC address (network-byte order)
+ * @mac_vlan: MACVLAN filter
+ * @vlan: VLAN ID
+ * @addr: MAC address (network-byte order)
*/
struct ionic_rx_filter_add_cmd {
u8 opcode;
@@ -1553,11 +1730,10 @@ struct ionic_rx_filter_add_cmd {
/**
* struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
- * @status: The status of the command (enum status_code)
- * @comp_index: The index in the descriptor ring for which this
- * is the completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
* @filter_id: Filter ID
- * @color: Color bit.
+ * @color: Color bit
*/
struct ionic_rx_filter_add_comp {
u8 status;
@@ -1584,63 +1760,6 @@ struct ionic_rx_filter_del_cmd {
typedef struct ionic_admin_comp ionic_rx_filter_del_comp;
-/**
- * struct ionic_qos_identify_cmd - QoS identify command
- * @opcode: opcode
- * @ver: Highest version of identify supported by driver
- *
- */
-struct ionic_qos_identify_cmd {
- u8 opcode;
- u8 ver;
- u8 rsvd[62];
-};
-
-/**
- * struct ionic_qos_identify_comp - QoS identify command completion
- * @status: The status of the command (enum status_code)
- * @ver: Version of identify returned by device
- */
-struct ionic_qos_identify_comp {
- u8 status;
- u8 ver;
- u8 rsvd[14];
-};
-
-#define IONIC_QOS_CLASS_MAX 7
-#define IONIC_QOS_CLASS_NAME_SZ 32
-#define IONIC_QOS_DSCP_MAX_VALUES 64
-
-/**
- * enum ionic_qos_class
- */
-enum ionic_qos_class {
- IONIC_QOS_CLASS_DEFAULT = 0,
- IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
- IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
- IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
- IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
- IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
- IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
-};
-
-/**
- * enum ionic_qos_class_type - Traffic classification criteria
- */
-enum ionic_qos_class_type {
- IONIC_QOS_CLASS_TYPE_NONE = 0,
- IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */
- IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */
-};
-
-/**
- * enum ionic_qos_sched_type - Qos class scheduling type
- */
-enum ionic_qos_sched_type {
- IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */
- IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */
-};
-
enum ionic_vf_attr {
IONIC_VF_ATTR_SPOOFCHK = 1,
IONIC_VF_ATTR_TRUST = 2,
@@ -1652,26 +1771,29 @@ enum ionic_vf_attr {
};
/**
- * VF link status
+ * enum ionic_vf_link_status - Virtual Function link status
+ * @IONIC_VF_LINK_STATUS_AUTO: Use link state of the uplink
+ * @IONIC_VF_LINK_STATUS_UP: Link always up
+ * @IONIC_VF_LINK_STATUS_DOWN: Link always down
*/
enum ionic_vf_link_status {
- IONIC_VF_LINK_STATUS_AUTO = 0, /* link state of the uplink */
- IONIC_VF_LINK_STATUS_UP = 1, /* link is always up */
- IONIC_VF_LINK_STATUS_DOWN = 2, /* link is always down */
+ IONIC_VF_LINK_STATUS_AUTO = 0,
+ IONIC_VF_LINK_STATUS_UP = 1,
+ IONIC_VF_LINK_STATUS_DOWN = 2,
};
/**
* struct ionic_vf_setattr_cmd - Set VF attributes on the NIC
* @opcode: Opcode
- * @index: VF index
* @attr: Attribute type (enum ionic_vf_attr)
- * macaddr mac address
- * vlanid vlan ID
- * maxrate max Tx rate in Mbps
- * spoofchk enable address spoof checking
- * trust enable VF trust
- * linkstate set link up or down
- * stats_pa set DMA address for VF stats
+ * @vf_index: VF index
+ * @macaddr: mac address
+ * @vlanid: vlan ID
+ * @maxrate: max Tx rate in Mbps
+ * @spoofchk: enable address spoof checking
+ * @trust: enable VF trust
+ * @linkstate: set link up or down
+ * @stats_pa: set DMA address for VF stats
*/
struct ionic_vf_setattr_cmd {
u8 opcode;
@@ -1701,8 +1823,8 @@ struct ionic_vf_setattr_comp {
/**
* struct ionic_vf_getattr_cmd - Get VF attributes from the NIC
* @opcode: Opcode
- * @index: VF index
* @attr: Attribute type (enum ionic_vf_attr)
+ * @vf_index: VF index
*/
struct ionic_vf_getattr_cmd {
u8 opcode;
@@ -1729,19 +1851,85 @@ struct ionic_vf_getattr_comp {
};
/**
- * union ionic_qos_config - Qos configuration structure
+ * struct ionic_qos_identify_cmd - QoS identify command
+ * @opcode: opcode
+ * @ver: Highest version of identify supported by driver
+ *
+ */
+struct ionic_qos_identify_cmd {
+ u8 opcode;
+ u8 ver;
+ u8 rsvd[62];
+};
+
+/**
+ * struct ionic_qos_identify_comp - QoS identify command completion
+ * @status: Status of the command (enum ionic_status_code)
+ * @ver: Version of identify returned by device
+ */
+struct ionic_qos_identify_comp {
+ u8 status;
+ u8 ver;
+ u8 rsvd[14];
+};
+
+#define IONIC_QOS_TC_MAX 8
+/* Capri max supported, should be renamed. */
+#define IONIC_QOS_CLASS_MAX 7
+#define IONIC_QOS_PCP_MAX 8
+#define IONIC_QOS_CLASS_NAME_SZ 32
+#define IONIC_QOS_DSCP_MAX 64
+#define IONIC_QOS_ALL_PCP 0xFF
+
+/**
+ * enum ionic_qos_class
+ */
+enum ionic_qos_class {
+ IONIC_QOS_CLASS_DEFAULT = 0,
+ IONIC_QOS_CLASS_USER_DEFINED_1 = 1,
+ IONIC_QOS_CLASS_USER_DEFINED_2 = 2,
+ IONIC_QOS_CLASS_USER_DEFINED_3 = 3,
+ IONIC_QOS_CLASS_USER_DEFINED_4 = 4,
+ IONIC_QOS_CLASS_USER_DEFINED_5 = 5,
+ IONIC_QOS_CLASS_USER_DEFINED_6 = 6,
+};
+
+/**
+ * enum ionic_qos_class_type - Traffic classification criteria
+ * @IONIC_QOS_CLASS_TYPE_NONE: No QoS
+ * @IONIC_QOS_CLASS_TYPE_PCP: Dot1Q PCP
+ * @IONIC_QOS_CLASS_TYPE_DSCP: IP DSCP
+ */
+enum ionic_qos_class_type {
+ IONIC_QOS_CLASS_TYPE_NONE = 0,
+ IONIC_QOS_CLASS_TYPE_PCP = 1,
+ IONIC_QOS_CLASS_TYPE_DSCP = 2,
+};
+
+/**
+ * enum ionic_qos_sched_type - QoS class scheduling type
+ * @IONIC_QOS_SCHED_TYPE_STRICT: Strict priority
+ * @IONIC_QOS_SCHED_TYPE_DWRR: Deficit weighted round-robin
+ */
+enum ionic_qos_sched_type {
+ IONIC_QOS_SCHED_TYPE_STRICT = 0,
+ IONIC_QOS_SCHED_TYPE_DWRR = 1,
+};
+
+/**
+ * union ionic_qos_config - QoS configuration structure
* @flags: Configuration flags
* IONIC_QOS_CONFIG_F_ENABLE enable
- * IONIC_QOS_CONFIG_F_DROP drop/nodrop
+ * IONIC_QOS_CONFIG_F_NO_DROP drop/nodrop
* IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite
* IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite
- * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type)
- * @class_type: Qos class type (enum ionic_qos_class_type)
- * @pause_type: Qos pause type (enum ionic_qos_pause_type)
- * @name: Qos class name
+ * @sched_type: QoS class scheduling type (enum ionic_qos_sched_type)
+ * @class_type: QoS class type (enum ionic_qos_class_type)
+ * @pause_type: QoS pause type (enum ionic_qos_pause_type)
+ * @name: QoS class name
* @mtu: MTU of the class
- * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP)
- * @dwrr_weight: Qos class scheduling weight
+ * @pfc_cos: Priority-Flow Control class of service
+ * @dwrr_weight: QoS class scheduling weight
* @strict_rlmt: Rate limit for strict priority scheduling
* @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP)
* @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP)
@@ -1752,7 +1940,8 @@ struct ionic_vf_getattr_comp {
union ionic_qos_config {
struct {
#define IONIC_QOS_CONFIG_F_ENABLE BIT(0)
-#define IONIC_QOS_CONFIG_F_DROP BIT(1)
+#define IONIC_QOS_CONFIG_F_NO_DROP BIT(1)
+/* Used to rewrite PCP or DSCP value. */
#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2)
#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3)
u8 flags;
@@ -1769,6 +1958,7 @@ union ionic_qos_config {
__le64 strict_rlmt;
};
/* marking */
+ /* Used to rewrite PCP or DSCP value. */
union {
u8 rw_dot1q_pcp;
u8 rw_ip_dscp;
@@ -1778,7 +1968,7 @@ union ionic_qos_config {
u8 dot1q_pcp;
struct {
u8 ndscp;
- u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES];
+ u8 ip_dscp[IONIC_QOS_DSCP_MAX];
};
};
};
@@ -1797,15 +1987,15 @@ union ionic_qos_identity {
u8 version;
u8 type;
u8 rsvd[62];
- union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
+ union ionic_qos_config config[IONIC_QOS_CLASS_MAX];
};
- __le32 words[512];
+ __le32 words[478];
};
/**
- * struct qos_init_cmd - QoS config init command
+ * struct ionic_qos_init_cmd - QoS config init command
* @opcode: Opcode
- * @group: Qos class id
+ * @group: QoS class id
* @info_pa: destination address for qos info
*/
struct ionic_qos_init_cmd {
@@ -1819,8 +2009,9 @@ struct ionic_qos_init_cmd {
typedef struct ionic_admin_comp ionic_qos_init_comp;
/**
- * struct ionic_qos_reset_cmd - Qos config reset command
+ * struct ionic_qos_reset_cmd - QoS config reset command
* @opcode: Opcode
+ * @group: QoS class id
*/
struct ionic_qos_reset_cmd {
u8 opcode;
@@ -1847,10 +2038,16 @@ struct ionic_fw_download_cmd {
typedef struct ionic_admin_comp ionic_fw_download_comp;
+/**
+ * enum ionic_fw_control_oper - FW control operations
+ * @IONIC_FW_RESET: Reset firmware
+ * @IONIC_FW_INSTALL: Install firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
+ */
enum ionic_fw_control_oper {
- IONIC_FW_RESET = 0, /* Reset firmware */
- IONIC_FW_INSTALL = 1, /* Install firmware */
- IONIC_FW_ACTIVATE = 2, /* Activate firmware */
+ IONIC_FW_RESET = 0,
+ IONIC_FW_INSTALL = 1,
+ IONIC_FW_ACTIVATE = 2,
};
/**
@@ -1869,8 +2066,10 @@ struct ionic_fw_control_cmd {
/**
* struct ionic_fw_control_comp - Firmware control copletion
- * @opcode: opcode
- * @slot: slot where the firmware was installed
+ * @status: Status of the command (enum ionic_status_code)
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @slot: Slot where the firmware was installed
+ * @color: Color bit
*/
struct ionic_fw_control_comp {
u8 status;
@@ -1888,11 +2087,11 @@ struct ionic_fw_control_comp {
/**
* struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
- * @lif_index: lif index
+ * @lif_index: LIF index
*
- * There is no rdma specific dev command completion struct. Completion uses
+ * There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
- * Nonzero status means the LIF does not support rdma.
+ * Nonzero status means the LIF does not support RDMA.
**/
struct ionic_rdma_reset_cmd {
u8 opcode;
@@ -1904,30 +2103,29 @@ struct ionic_rdma_reset_cmd {
/**
* struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
- * @lif_index lif index
- * @qid_ver: (qid | (rdma version << 24))
+ * @lif_index: LIF index
+ * @qid_ver: (qid | (RDMA version << 24))
* @cid: intr, eq_id, or cq_id
* @dbid: doorbell page id
* @depth_log2: log base two of queue depth
* @stride_log2: log base two of queue stride
* @dma_addr: address of the queue memory
- * @xxx_table_index: temporary, but should not need pgtbl for contig. queues.
*
- * The same command struct is used to create an rdma event queue, completion
- * queue, or rdma admin queue. The cid is an interrupt number for an event
+ * The same command struct is used to create an RDMA event queue, completion
+ * queue, or RDMA admin queue. The cid is an interrupt number for an event
* queue, an event queue id for a completion queue, or a completion queue id
- * for an rdma admin queue.
+ * for an RDMA admin queue.
*
* The queue created via a dev command must be contiguous in dma space.
*
* The dev commands are intended only to be used during driver initialization,
- * to create queues supporting the rdma admin queue. Other queues, and other
- * types of rdma resources like memory regions, will be created and registered
- * via the rdma admin queue, and will support a more complete interface
+ * to create queues supporting the RDMA admin queue. Other queues, and other
+ * types of RDMA resources like memory regions, will be created and registered
+ * via the RDMA admin queue, and will support a more complete interface
* providing scatter gather lists for larger, scattered queue buffers and
* memory registration.
*
- * There is no rdma specific dev command completion struct. Completion uses
+ * There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
**/
struct ionic_rdma_queue_cmd {
@@ -1940,8 +2138,7 @@ struct ionic_rdma_queue_cmd {
u8 depth_log2;
u8 stride_log2;
__le64 dma_addr;
- u8 rsvd2[36];
- __le32 xxx_table_index;
+ u8 rsvd2[40];
};
/******************************************************************
@@ -1949,7 +2146,7 @@ struct ionic_rdma_queue_cmd {
******************************************************************/
/**
- * struct ionic_notifyq_event
+ * struct ionic_notifyq_event - Generic event reporting structure
* @eid: event number
* @ecode: event code
* @data: unspecified data about the event
@@ -1964,9 +2161,9 @@ struct ionic_notifyq_event {
};
/**
- * struct ionic_link_change_event
+ * struct ionic_link_change_event - Link change event notification
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_LINK_CHANGE
+ * @ecode: event code = IONIC_EVENT_LINK_CHANGE
* @link_status: link up or down, with error bits (enum port_status)
* @link_speed: speed of the network link
*
@@ -1981,9 +2178,9 @@ struct ionic_link_change_event {
};
/**
- * struct ionic_reset_event
+ * struct ionic_reset_event - Reset event notification
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_RESET
+ * @ecode: event code = IONIC_EVENT_RESET
* @reset_code: reset type
* @state: 0=pending, 1=complete, 2=error
*
@@ -1999,11 +2196,9 @@ struct ionic_reset_event {
};
/**
- * struct ionic_heartbeat_event
+ * struct ionic_heartbeat_event - Sent periodically by NIC to indicate health
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_HEARTBEAT
- *
- * Sent periodically by the NIC to indicate continued health
+ * @ecode: event code = IONIC_EVENT_HEARTBEAT
*/
struct ionic_heartbeat_event {
__le64 eid;
@@ -2012,12 +2207,10 @@ struct ionic_heartbeat_event {
};
/**
- * struct ionic_log_event
+ * struct ionic_log_event - Sent to notify the driver of an internal error
* @eid: event number
- * @ecode: event code = EVENT_OPCODE_LOG
+ * @ecode: event code = IONIC_EVENT_LOG
* @data: log data
- *
- * Sent to notify the driver of an internal error.
*/
struct ionic_log_event {
__le64 eid;
@@ -2026,7 +2219,18 @@ struct ionic_log_event {
};
/**
- * struct ionic_port_stats
+ * struct ionic_xcvr_event - Transceiver change event
+ * @eid: event number
+ * @ecode: event code = IONIC_EVENT_XCVR
+ */
+struct ionic_xcvr_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 rsvd[54];
+};
+
+/**
+ * struct ionic_port_stats - Port statistics structure
*/
struct ionic_port_stats {
__le64 frames_rx_ok;
@@ -2131,28 +2335,61 @@ struct ionic_mgmt_port_stats {
__le64 frames_rx_multicast;
__le64 frames_rx_broadcast;
__le64 frames_rx_pause;
- __le64 frames_rx_bad_length0;
- __le64 frames_rx_undersized1;
- __le64 frames_rx_oversized2;
- __le64 frames_rx_fragments3;
- __le64 frames_rx_jabber4;
- __le64 frames_rx_64b5;
- __le64 frames_rx_65b_127b6;
- __le64 frames_rx_128b_255b7;
- __le64 frames_rx_256b_511b8;
- __le64 frames_rx_512b_1023b9;
- __le64 frames_rx_1024b_1518b0;
- __le64 frames_rx_gt_1518b1;
- __le64 frames_rx_fifo_full2;
- __le64 frames_tx_ok3;
- __le64 frames_tx_all4;
- __le64 frames_tx_bad5;
- __le64 octets_tx_ok6;
- __le64 octets_tx_total7;
- __le64 frames_tx_unicast8;
- __le64 frames_tx_multicast9;
- __le64 frames_tx_broadcast0;
- __le64 frames_tx_pause1;
+ __le64 frames_rx_bad_length;
+ __le64 frames_rx_undersized;
+ __le64 frames_rx_oversized;
+ __le64 frames_rx_fragments;
+ __le64 frames_rx_jabber;
+ __le64 frames_rx_64b;
+ __le64 frames_rx_65b_127b;
+ __le64 frames_rx_128b_255b;
+ __le64 frames_rx_256b_511b;
+ __le64 frames_rx_512b_1023b;
+ __le64 frames_rx_1024b_1518b;
+ __le64 frames_rx_gt_1518b;
+ __le64 frames_rx_fifo_full;
+ __le64 frames_tx_ok;
+ __le64 frames_tx_all;
+ __le64 frames_tx_bad;
+ __le64 octets_tx_ok;
+ __le64 octets_tx_total;
+ __le64 frames_tx_unicast;
+ __le64 frames_tx_multicast;
+ __le64 frames_tx_broadcast;
+ __le64 frames_tx_pause;
+};
+
+enum ionic_pb_buffer_drop_stats {
+ IONIC_BUFFER_INTRINSIC_DROP = 0,
+ IONIC_BUFFER_DISCARDED,
+ IONIC_BUFFER_ADMITTED,
+ IONIC_BUFFER_OUT_OF_CELLS_DROP,
+ IONIC_BUFFER_OUT_OF_CELLS_DROP_2,
+ IONIC_BUFFER_OUT_OF_CREDIT_DROP,
+ IONIC_BUFFER_TRUNCATION_DROP,
+ IONIC_BUFFER_PORT_DISABLED_DROP,
+ IONIC_BUFFER_COPY_TO_CPU_TAIL_DROP,
+ IONIC_BUFFER_SPAN_TAIL_DROP,
+ IONIC_BUFFER_MIN_SIZE_VIOLATION_DROP,
+ IONIC_BUFFER_ENQUEUE_ERROR_DROP,
+ IONIC_BUFFER_INVALID_PORT_DROP,
+ IONIC_BUFFER_INVALID_OUTPUT_QUEUE_DROP,
+ IONIC_BUFFER_DROP_MAX,
+};
+
+/**
+ * struct port_pb_stats - packet buffers system stats
+ * uses ionic_pb_buffer_drop_stats for drop_counts[]
+ */
+struct ionic_port_pb_stats {
+ __le64 sop_count_in;
+ __le64 eop_count_in;
+ __le64 sop_count_out;
+ __le64 eop_count_out;
+ __le64 drop_counts[IONIC_BUFFER_DROP_MAX];
+ __le64 input_queue_buffer_occupancy[IONIC_QOS_TC_MAX];
+ __le64 input_queue_port_monitor[IONIC_QOS_TC_MAX];
+ __le64 output_queue_port_monitor[IONIC_QOS_TC_MAX];
};
/**
@@ -2184,22 +2421,31 @@ union ionic_port_identity {
u8 rsvd2[44];
union ionic_port_config config;
};
- __le32 words[512];
+ __le32 words[478];
};
/**
* struct ionic_port_info - port info structure
- * @port_status: port status
- * @port_stats: port stats
+ * @config: Port configuration data
+ * @status: Port status data
+ * @stats: Port statistics data
+ * @mgmt_stats: Port management statistics data
+ * @port_pb_drop_stats: uplink pb drop stats
*/
struct ionic_port_info {
union ionic_port_config config;
struct ionic_port_status status;
- struct ionic_port_stats stats;
+ union {
+ struct ionic_port_stats stats;
+ struct ionic_mgmt_port_stats mgmt_stats;
+ };
+ /* room for pb_stats to start at 2k offset */
+ u8 rsvd[760];
+ struct ionic_port_pb_stats pb_stats;
};
/**
- * struct ionic_lif_stats
+ * struct ionic_lif_stats - LIF statistics structure
*/
struct ionic_lif_stats {
/* RX */
@@ -2252,7 +2498,7 @@ struct ionic_lif_stats {
__le64 tx_queue_error;
__le64 tx_desc_fetch_error;
__le64 tx_desc_data_error;
- __le64 rsvd9;
+ __le64 tx_queue_empty;
__le64 rsvd10;
__le64 rsvd11;
__le64 rsvd12;
@@ -2353,7 +2599,10 @@ struct ionic_lif_stats {
};
/**
- * struct ionic_lif_info - lif info structure
+ * struct ionic_lif_info - LIF info structure
+ * @config: LIF configuration structure
+ * @status: LIF status structure
+ * @stats: LIF statistics structure
*/
struct ionic_lif_info {
union ionic_lif_config config;
@@ -2389,7 +2638,9 @@ union ionic_dev_cmd {
struct ionic_qos_init_cmd qos_init;
struct ionic_qos_reset_cmd qos_reset;
+ struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
+ struct ionic_q_control_cmd q_control;
};
union ionic_dev_cmd_comp {
@@ -2421,19 +2672,20 @@ union ionic_dev_cmd_comp {
ionic_qos_init_comp qos_init;
ionic_qos_reset_comp qos_reset;
+ struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
};
/**
- * union dev_info - Device info register format (read-only)
- * @signature: Signature value of 0x44455649 ('DEVI').
- * @version: Current version of info.
- * @asic_type: Asic type.
- * @asic_rev: Asic revision.
- * @fw_status: Firmware status.
- * @fw_heartbeat: Firmware heartbeat counter.
- * @serial_num: Serial number.
- * @fw_version: Firmware version.
+ * union ionic_dev_info_regs - Device info register format (read-only)
+ * @signature: Signature value of 0x44455649 ('DEVI')
+ * @version: Current version of info
+ * @asic_type: Asic type
+ * @asic_rev: Asic revision
+ * @fw_status: Firmware status
+ * @fw_heartbeat: Firmware heartbeat counter
+ * @serial_num: Serial number
+ * @fw_version: Firmware version
*/
union ionic_dev_info_regs {
#define IONIC_DEVINFO_FWVERS_BUFLEN 32
@@ -2454,10 +2706,10 @@ union ionic_dev_info_regs {
/**
* union ionic_dev_cmd_regs - Device command register format (read-write)
- * @doorbell: Device Cmd Doorbell, write-only.
+ * @doorbell: Device Cmd Doorbell, write-only
* Write a 1 to signal device to process cmd,
* poll done for completion.
- * @done: Done indicator, bit 0 == 1 when command is complete.
+ * @done: Done indicator, bit 0 == 1 when command is complete
* @cmd: Opcode-specific command bytes
* @comp: Opcode-specific response bytes
* @data: Opcode-specific side-data
@@ -2475,7 +2727,7 @@ union ionic_dev_cmd_regs {
};
/**
- * union ionic_dev_regs - Device register format in for bar 0 page 0
+ * union ionic_dev_regs - Device register format for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
*/
@@ -2490,6 +2742,7 @@ union ionic_dev_regs {
union ionic_adminq_cmd {
struct ionic_admin_cmd cmd;
struct ionic_nop_cmd nop;
+ struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
struct ionic_q_control_cmd q_control;
struct ionic_lif_setattr_cmd lif_setattr;
@@ -2506,6 +2759,7 @@ union ionic_adminq_cmd {
union ionic_adminq_comp {
struct ionic_admin_comp comp;
struct ionic_nop_comp nop;
+ struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
struct ionic_lif_setattr_comp lif_setattr;
struct ionic_lif_getattr_comp lif_getattr;
@@ -2531,14 +2785,14 @@ union ionic_adminq_comp {
/**
* struct ionic_doorbell - Doorbell register layout
* @p_index: Producer index
- * @ring: Selects the specific ring of the queue to update.
+ * @ring: Selects the specific ring of the queue to update
* Type-specific meaning:
- * ring=0: Default producer/consumer queue.
+ * ring=0: Default producer/consumer queue
* ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs
* send events to EQs when armed. EQs send
* interrupts when armed.
- * @qid: The queue id selects the queue destination for the
- * producer index and flags.
+ * @qid_lo: Queue destination for the producer index and flags (low bits)
+ * @qid_hi: Queue destination for the producer index and flags (high bits)
*/
struct ionic_doorbell {
__le16 p_index;
@@ -2571,6 +2825,7 @@ struct ionic_identity {
union ionic_lif_identity lif;
union ionic_port_identity port;
union ionic_qos_identity qos;
+ union ionic_q_identity txq;
};
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index f8a9c1bcffc9..7321a92f8395 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -17,6 +17,16 @@
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
+/* queuetype support level */
+static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
+ [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
+ [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
+ [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
+ [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
+ * 1 = ... with Tx SG version 1
+ */
+};
+
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
@@ -27,6 +37,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
+static void ionic_lif_queue_identify(struct ionic_lif *lif);
static void ionic_lif_deferred_work(struct work_struct *work)
{
@@ -186,10 +197,10 @@ static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
return 0;
}
-static void ionic_intr_free(struct ionic_lif *lif, int index)
+static void ionic_intr_free(struct ionic *ionic, int index)
{
- if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
- clear_bit(index, lif->ionic->intrs);
+ if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
+ clear_bit(index, ionic->intrs);
}
static int ionic_qcq_enable(struct ionic_qcq *qcq)
@@ -299,7 +310,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
- ionic_intr_free(lif, qcq->intr.index);
+ ionic_intr_free(lif->ionic, qcq->intr.index);
}
devm_kfree(dev, qcq->cq.info);
@@ -345,7 +356,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
struct ionic_qcq *n_qcq)
{
if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
- ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
+ ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
n_qcq->flags &= ~IONIC_QCQ_F_INTR;
}
@@ -444,7 +455,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
- new->intr.index = INTR_INDEX_NOT_ASSIGNED;
+ new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
@@ -497,7 +508,7 @@ err_out_free_irq:
devm_free_irq(dev, new->intr.vector, &new->napi);
err_out_free_intr:
if (flags & IONIC_QCQ_F_INTR)
- ionic_intr_free(lif, new->intr.index);
+ ionic_intr_free(lif->ionic, new->intr.index);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -597,6 +608,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
@@ -614,6 +626,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
+ dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
q->tail = q->info;
q->head = q->tail;
@@ -646,6 +660,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
@@ -663,6 +678,8 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
+ dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
q->tail = q->info;
q->head = q->tail;
@@ -726,7 +743,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
}
break;
default:
- netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
+ netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
comp->event.ecode, eid);
break;
}
@@ -775,8 +792,8 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
return max(n_work, a_work);
}
-static void ionic_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *ns)
+void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_lif_stats *ls;
@@ -1509,17 +1526,25 @@ static void ionic_txrx_free(struct ionic_lif *lif)
static int ionic_txrx_alloc(struct ionic_lif *lif)
{
+ unsigned int sg_desc_sz;
unsigned int flags;
unsigned int i;
int err = 0;
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
+ sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
lif->ntxq_descs,
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
- sizeof(struct ionic_txq_sg_desc),
+ sg_desc_sz,
lif->kern_pid, &lif->txqcqs[i].qcq);
if (err)
goto err_out;
@@ -1682,7 +1707,7 @@ int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ if (!netif_device_present(netdev))
return 0;
ionic_stop_queues(lif);
@@ -1699,6 +1724,9 @@ static int ionic_get_vf_config(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
int ret = 0;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_read(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1726,6 +1754,9 @@ static int ionic_get_vf_stats(struct net_device *netdev, int vf,
struct ionic_lif_stats *vs;
int ret = 0;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_read(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1761,6 +1792,9 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
return -EINVAL;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1792,6 +1826,9 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1818,6 +1855,9 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
if (tx_min)
return -EINVAL;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1840,6 +1880,9 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
u8 data = set; /* convert to u8 for config */
int ret;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1862,6 +1905,9 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
u8 data = set; /* convert to u8 for config */
int ret;
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -1898,6 +1944,9 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
return -EINVAL;
}
+ if (!netif_device_present(netdev))
+ return -EBUSY;
+
down_write(&ionic->vf_op_lock);
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
@@ -2065,9 +2114,17 @@ int ionic_lifs_alloc(struct ionic *ionic)
/* only build the first lif, others are for later features */
set_bit(0, ionic->lifbits);
+
lif = ionic_lif_alloc(ionic, 0);
+ if (IS_ERR_OR_NULL(lif)) {
+ clear_bit(0, ionic->lifbits);
+ return -ENOMEM;
+ }
- return PTR_ERR_OR_ZERO(lif);
+ lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
+ ionic_lif_queue_identify(lif);
+
+ return 0;
}
static void ionic_lif_reset(struct ionic_lif *lif)
@@ -2292,6 +2349,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
.opcode = IONIC_CMD_Q_INIT,
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
+ .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_ENA),
@@ -2578,6 +2636,80 @@ void ionic_lifs_unregister(struct ionic *ionic)
unregister_netdev(ionic->master_lif->netdev);
}
+static void ionic_lif_queue_identify(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ union ionic_q_identity *q_ident;
+ struct ionic_dev *idev;
+ int qtype;
+ int err;
+
+ idev = &lif->ionic->idev;
+ q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+
+ for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
+ struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
+
+ /* filter out the ones we know about */
+ switch (qtype) {
+ case IONIC_QTYPE_ADMINQ:
+ case IONIC_QTYPE_NOTIFYQ:
+ case IONIC_QTYPE_RXQ:
+ case IONIC_QTYPE_TXQ:
+ break;
+ default:
+ continue;
+ }
+
+ memset(qti, 0, sizeof(*qti));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
+ ionic_qtype_versions[qtype]);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ qti->version = q_ident->version;
+ qti->supported = q_ident->supported;
+ qti->features = le64_to_cpu(q_ident->features);
+ qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
+ qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
+ qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
+ qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
+ qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+ }
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err == -EINVAL) {
+ dev_err(ionic->dev, "qtype %d not supported\n", qtype);
+ continue;
+ } else if (err == -EIO) {
+ dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
+ return;
+ } else if (err) {
+ dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
+ qtype, err);
+ return;
+ }
+
+ dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
+ qtype, qti->version);
+ dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
+ qtype, qti->supported);
+ dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
+ qtype, qti->features);
+ dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
+ qtype, qti->desc_sz);
+ dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
+ qtype, qti->comp_sz);
+ dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
+ qtype, qti->sg_desc_sz);
+ dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
+ qtype, qti->max_sg_elems);
+ dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
+ qtype, qti->sg_desc_stride);
+ }
+}
+
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lid)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 5d4ffda5c05f..c3428034a17b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -20,11 +20,13 @@ struct ionic_tx_stats {
u64 bytes;
u64 clean;
u64 linearize;
- u64 no_csum;
+ u64 csum_none;
u64 csum;
u64 crc32_csum;
u64 tso;
+ u64 tso_bytes;
u64 frags;
+ u64 vlan_inserted;
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
};
@@ -38,6 +40,7 @@ struct ionic_rx_stats {
u64 csum_error;
u64 buffers_posted;
u64 dropped;
+ u64 vlan_stripped;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -114,11 +117,17 @@ struct ionic_lif_sw_stats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_tso;
- u64 tx_no_csum;
+ u64 tx_tso_bytes;
+ u64 tx_csum_none;
u64 tx_csum;
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_error;
+ u64 hw_tx_dropped;
+ u64 hw_rx_dropped;
+ u64 hw_rx_over_errors;
+ u64 hw_rx_missed_errors;
+ u64 hw_tx_aborted_errors;
};
enum ionic_lif_state_flags {
@@ -133,6 +142,17 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_STATE_SIZE
};
+struct ionic_qtype_info {
+ u8 version;
+ u8 supported;
+ u64 features;
+ u16 desc_sz;
+ u16 comp_sz;
+ u16 sg_desc_sz;
+ u16 max_sg_elems;
+ u16 sg_desc_stride;
+};
+
#define IONIC_LIF_NAME_MAX_SZ 32
struct ionic_lif {
char name[IONIC_LIF_NAME_MAX_SZ];
@@ -161,11 +181,13 @@ struct ionic_lif {
bool mc_overflow;
unsigned int nmcast;
bool uc_overflow;
+ u16 lif_type;
unsigned int nucast;
struct ionic_lif_info *info;
dma_addr_t info_pa;
u32 info_sz;
+ struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
u16 rss_types;
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
@@ -227,6 +249,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
}
void ionic_link_status_check_request(struct ionic_lif *lif);
+void ionic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
int ionic_lifs_alloc(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 3344bc1f7671..df5b9bcc3aba 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/utsname.h>
-#include <linux/vermagic.h>
+#include <generated/utsrelease.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -152,6 +152,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_RX_FILTER_ADD";
case IONIC_CMD_RX_FILTER_DEL:
return "IONIC_CMD_RX_FILTER_DEL";
+ case IONIC_CMD_Q_IDENTIFY:
+ return "IONIC_CMD_Q_IDENTIFY";
case IONIC_CMD_Q_INIT:
return "IONIC_CMD_Q_INIT";
case IONIC_CMD_Q_CONTROL:
@@ -356,7 +358,7 @@ try_again:
done = ionic_dev_cmd_done(idev);
if (done)
break;
- msleep(20);
+ msleep(5);
hb = ionic_heartbeat_check(ionic);
} while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
@@ -413,6 +415,7 @@ int ionic_setup(struct ionic *ionic)
err = ionic_dev_setup(ionic);
if (err)
return err;
+ ionic_reset(ionic);
return 0;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 8f2a8fb029f1..2a1885da58a6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -15,11 +15,109 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(rx_packets),
IONIC_LIF_STAT_DESC(rx_bytes),
IONIC_LIF_STAT_DESC(tx_tso),
- IONIC_LIF_STAT_DESC(tx_no_csum),
+ IONIC_LIF_STAT_DESC(tx_tso_bytes),
+ IONIC_LIF_STAT_DESC(tx_csum_none),
IONIC_LIF_STAT_DESC(tx_csum),
IONIC_LIF_STAT_DESC(rx_csum_none),
IONIC_LIF_STAT_DESC(rx_csum_complete),
IONIC_LIF_STAT_DESC(rx_csum_error),
+ IONIC_LIF_STAT_DESC(hw_tx_dropped),
+ IONIC_LIF_STAT_DESC(hw_rx_dropped),
+ IONIC_LIF_STAT_DESC(hw_rx_over_errors),
+ IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
+ IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
+};
+
+static const struct ionic_stat_desc ionic_port_stats_desc[] = {
+ IONIC_PORT_STAT_DESC(frames_rx_ok),
+ IONIC_PORT_STAT_DESC(frames_rx_all),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_fcs),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_all),
+ IONIC_PORT_STAT_DESC(octets_rx_ok),
+ IONIC_PORT_STAT_DESC(octets_rx_all),
+ IONIC_PORT_STAT_DESC(frames_rx_unicast),
+ IONIC_PORT_STAT_DESC(frames_rx_multicast),
+ IONIC_PORT_STAT_DESC(frames_rx_broadcast),
+ IONIC_PORT_STAT_DESC(frames_rx_pause),
+ IONIC_PORT_STAT_DESC(frames_rx_bad_length),
+ IONIC_PORT_STAT_DESC(frames_rx_undersized),
+ IONIC_PORT_STAT_DESC(frames_rx_oversized),
+ IONIC_PORT_STAT_DESC(frames_rx_fragments),
+ IONIC_PORT_STAT_DESC(frames_rx_jabber),
+ IONIC_PORT_STAT_DESC(frames_rx_pripause),
+ IONIC_PORT_STAT_DESC(frames_rx_stomped_crc),
+ IONIC_PORT_STAT_DESC(frames_rx_too_long),
+ IONIC_PORT_STAT_DESC(frames_rx_vlan_good),
+ IONIC_PORT_STAT_DESC(frames_rx_dropped),
+ IONIC_PORT_STAT_DESC(frames_rx_less_than_64b),
+ IONIC_PORT_STAT_DESC(frames_rx_64b),
+ IONIC_PORT_STAT_DESC(frames_rx_65b_127b),
+ IONIC_PORT_STAT_DESC(frames_rx_128b_255b),
+ IONIC_PORT_STAT_DESC(frames_rx_256b_511b),
+ IONIC_PORT_STAT_DESC(frames_rx_512b_1023b),
+ IONIC_PORT_STAT_DESC(frames_rx_1024b_1518b),
+ IONIC_PORT_STAT_DESC(frames_rx_1519b_2047b),
+ IONIC_PORT_STAT_DESC(frames_rx_2048b_4095b),
+ IONIC_PORT_STAT_DESC(frames_rx_4096b_8191b),
+ IONIC_PORT_STAT_DESC(frames_rx_8192b_9215b),
+ IONIC_PORT_STAT_DESC(frames_rx_other),
+ IONIC_PORT_STAT_DESC(frames_tx_ok),
+ IONIC_PORT_STAT_DESC(frames_tx_all),
+ IONIC_PORT_STAT_DESC(frames_tx_bad),
+ IONIC_PORT_STAT_DESC(octets_tx_ok),
+ IONIC_PORT_STAT_DESC(octets_tx_total),
+ IONIC_PORT_STAT_DESC(frames_tx_unicast),
+ IONIC_PORT_STAT_DESC(frames_tx_multicast),
+ IONIC_PORT_STAT_DESC(frames_tx_broadcast),
+ IONIC_PORT_STAT_DESC(frames_tx_pause),
+ IONIC_PORT_STAT_DESC(frames_tx_pripause),
+ IONIC_PORT_STAT_DESC(frames_tx_vlan),
+ IONIC_PORT_STAT_DESC(frames_tx_less_than_64b),
+ IONIC_PORT_STAT_DESC(frames_tx_64b),
+ IONIC_PORT_STAT_DESC(frames_tx_65b_127b),
+ IONIC_PORT_STAT_DESC(frames_tx_128b_255b),
+ IONIC_PORT_STAT_DESC(frames_tx_256b_511b),
+ IONIC_PORT_STAT_DESC(frames_tx_512b_1023b),
+ IONIC_PORT_STAT_DESC(frames_tx_1024b_1518b),
+ IONIC_PORT_STAT_DESC(frames_tx_1519b_2047b),
+ IONIC_PORT_STAT_DESC(frames_tx_2048b_4095b),
+ IONIC_PORT_STAT_DESC(frames_tx_4096b_8191b),
+ IONIC_PORT_STAT_DESC(frames_tx_8192b_9215b),
+ IONIC_PORT_STAT_DESC(frames_tx_other),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_0),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_1),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_2),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_3),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_4),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_5),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_6),
+ IONIC_PORT_STAT_DESC(frames_tx_pri_7),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_0),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_1),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_2),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_3),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_4),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_5),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_6),
+ IONIC_PORT_STAT_DESC(frames_rx_pri_7),
+ IONIC_PORT_STAT_DESC(tx_pripause_0_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_1_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_2_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_3_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_4_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_5_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_6_1us_count),
+ IONIC_PORT_STAT_DESC(tx_pripause_7_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_0_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_1_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_2_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_3_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_4_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_5_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_6_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pripause_7_1us_count),
+ IONIC_PORT_STAT_DESC(rx_pause_1us_count),
+ IONIC_PORT_STAT_DESC(frames_tx_truncated),
};
static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
@@ -29,6 +127,11 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(dma_map_err),
IONIC_TX_STAT_DESC(linearize),
IONIC_TX_STAT_DESC(frags),
+ IONIC_TX_STAT_DESC(tso),
+ IONIC_TX_STAT_DESC(tso_bytes),
+ IONIC_TX_STAT_DESC(csum_none),
+ IONIC_TX_STAT_DESC(csum),
+ IONIC_TX_STAT_DESC(vlan_inserted),
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
@@ -40,6 +143,7 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(csum_complete),
IONIC_RX_STAT_DESC(csum_error),
IONIC_RX_STAT_DESC(dropped),
+ IONIC_RX_STAT_DESC(vlan_stripped),
};
static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
@@ -62,6 +166,7 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
+#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
@@ -76,6 +181,7 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
{
struct ionic_tx_stats *tstats;
struct ionic_rx_stats *rstats;
+ struct rtnl_link_stats64 ns;
struct ionic_qcq *txqcq;
struct ionic_qcq *rxqcq;
int q_num;
@@ -89,7 +195,8 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
stats->tx_packets += tstats->pkts;
stats->tx_bytes += tstats->bytes;
stats->tx_tso += tstats->tso;
- stats->tx_no_csum += tstats->no_csum;
+ stats->tx_tso_bytes += tstats->tso_bytes;
+ stats->tx_csum_none += tstats->csum_none;
stats->tx_csum += tstats->csum;
}
@@ -103,6 +210,13 @@ static void ionic_get_lif_stats(struct ionic_lif *lif,
stats->rx_csum_error += rstats->csum_error;
}
}
+
+ ionic_get_stats64(lif->netdev, &ns);
+ stats->hw_tx_dropped = ns.tx_dropped;
+ stats->hw_rx_dropped = ns.rx_dropped;
+ stats->hw_rx_over_errors = ns.rx_over_errors;
+ stats->hw_rx_missed_errors = ns.rx_missed_errors;
+ stats->hw_tx_aborted_errors = ns.tx_aborted_errors;
}
static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
@@ -118,6 +232,9 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
+ /* port stats */
+ total += IONIC_NUM_PORT_STATS;
+
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
@@ -144,6 +261,13 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name);
*buf += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
+ snprintf(*buf, ETH_GSTRING_LEN,
+ ionic_port_stats_desc[i].name);
+ *buf += ETH_GSTRING_LEN;
+ }
+
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s",
@@ -225,6 +349,7 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
{
+ struct ionic_port_stats *port_stats;
struct ionic_lif_sw_stats lif_stats;
struct ionic_qcq *txqcq, *rxqcq;
struct ionic_tx_stats *txstats;
@@ -238,6 +363,13 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
+ port_stats = &lif->ionic->idev.port_info->stats;
+ for (i = 0; i < IONIC_NUM_PORT_STATS; i++) {
+ **buf = IONIC_READ_STAT_LE64(port_stats,
+ &ionic_port_stats_desc[i]);
+ (*buf)++;
+ }
+
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
txstats = &lif_to_txstats(lif, q_num);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
index d2c1122a2c6e..3f543512616e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
@@ -11,6 +11,9 @@
.offset = IONIC_STAT_TO_OFFSET(type, stat_name) \
}
+#define IONIC_PORT_STAT_DESC(stat_name) \
+ IONIC_STAT_DESC(struct ionic_port_stats, stat_name)
+
#define IONIC_LIF_STAT_DESC(stat_name) \
IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name)
@@ -45,6 +48,9 @@ extern const int ionic_num_stats_grps;
#define IONIC_READ_STAT64(base_ptr, desc_ptr) \
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
+ __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
u64 offset;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index d233b6e77b1e..b7f900c11834 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -10,8 +10,10 @@
#include "ionic_lif.h"
#include "ionic_txrx.h"
-static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg);
+static void ionic_rx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
@@ -140,8 +142,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return skb;
}
-static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+static void ionic_rx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
struct ionic_qcq *qcq = q_to_qcq(q);
@@ -210,10 +214,11 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
(comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
stats->csum_error++;
- if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(comp->vlan_tci));
+ if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(comp->vlan_tci));
+ stats->vlan_stripped++;
}
if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
@@ -475,7 +480,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
return work_done;
}
-static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len)
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ void *data, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->lif->ionic->dev;
@@ -491,7 +497,8 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t
return dma_addr;
}
-static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag,
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ const skb_frag_t *frag,
size_t offset, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
@@ -507,8 +514,10 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *fra
return dma_addr;
}
-static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+static void ionic_tx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
struct ionic_txq_sg_elem *elem = sg_desc->elems;
@@ -852,6 +861,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts += total_pkts;
stats->bytes += total_bytes;
stats->tso++;
+ stats->tso_bytes += total_bytes;
return 0;
@@ -890,9 +900,12 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
flags, skb_shinfo(skb)->nr_frags, dma_addr);
desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(skb_headlen(skb));
- desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ if (has_vlan) {
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ stats->vlan_inserted++;
+ }
if (skb->csum_not_inet)
stats->crc32_csum++;
@@ -927,9 +940,12 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
flags, skb_shinfo(skb)->nr_frags, dma_addr);
desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(skb_headlen(skb));
- desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ if (has_vlan) {
+ desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
+ stats->vlan_inserted++;
+ }
- stats->no_csum++;
+ stats->csum_none++;
return 0;
}
@@ -989,6 +1005,7 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
+ int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
int err;
@@ -997,7 +1014,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
return (skb->len / skb_shinfo(skb)->gso_size) + 1;
/* If non-TSO, just need 1 desc and nr_frags sg elems */
- if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
+ if (skb_shinfo(skb)->nr_frags <= sg_elems)
return 1;
/* Too many frags, so linearize */
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fa41bf08a589..66ed39d6f357 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -740,12 +740,6 @@ struct qed_dbg_feature {
u32 dumped_dwords;
};
-struct qed_dbg_params {
- struct qed_dbg_feature features[DBG_FEATURE_NUM];
- u8 engine_for_debug;
- bool print_data;
-};
-
struct qed_dev {
u32 dp_module;
u8 dp_level;
@@ -844,6 +838,9 @@ struct qed_dev {
/* Recovery */
bool recov_in_prog;
+ /* Indicates whether should prevent attentions from being reasserted */
+ bool attn_clr_en;
+
/* LLH info */
u8 ppfid_bitmap;
struct qed_llh_info *p_llh_info;
@@ -872,17 +869,18 @@ struct qed_dev {
} protocol_ops;
void *ops_cookie;
- struct qed_dbg_params dbg_params;
-
#ifdef CONFIG_QED_LL2
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
+ bool print_dbg_data;
+
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
@@ -1020,6 +1018,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type);
void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 1a636bad717d..7b76667acaba 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -110,6 +110,7 @@ struct src_ent {
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
+#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
@@ -293,18 +294,40 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL;
}
-static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
+ u32 num_srqs, u32 num_xrc_srqs)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
+ p_mgr->xrc_srq_count = num_xrc_srqs;
}
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
+
+ return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+}
+
+static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
+{
+ u32 page_size;
+
+ page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
+ return page_size / XRC_SRQ_CXT_SIZE;
+}
+
+u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ u32 total_srqs;
+
+ total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
- return p_mgr->srq_count;
+ return total_srqs;
}
/* set the iids count per protocol */
@@ -692,7 +715,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
}
/* TSDM (SRQ CONTEXT) */
- total = qed_cxt_get_srq_count(p_hwfn);
+ total = qed_cxt_get_total_srq_count(p_hwfn);
if (total) {
p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
@@ -1962,11 +1985,9 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
struct qed_rdma_pf_params *p_params,
u32 num_tasks)
{
- u32 num_cons, num_qps, num_srqs;
+ u32 num_cons, num_qps;
enum protocol_type proto;
- num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
-
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
DP_NOTICE(p_hwfn,
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
@@ -1989,6 +2010,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
}
if (num_cons && num_tasks) {
+ u32 num_srqs, num_xrc_srqs;
+
qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
/* Deliberatly passing ROCE for tasks id. This is because
@@ -1997,7 +2020,13 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
QED_CXT_ROCE_TID_SEG, 1,
num_tasks, false);
- qed_cxt_set_srq_count(p_hwfn, num_srqs);
+
+ num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
+
+ /* XRC SRQs populate a single ILT page */
+ num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
+
+ qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
} else {
DP_INFO(p_hwfn->cdev,
"RDMA personality used without setting params!\n");
@@ -2163,10 +2192,17 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
p_blk = &p_cli->pf_blks[CDUC_BLK];
break;
case QED_ELEM_SRQ:
+ /* The first ILT page is not used for regular SRQs. Skip it. */
+ iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
elem_size = SRQ_CXT_SIZE;
p_blk = &p_cli->pf_blks[SRQ_BLK];
break;
+ case QED_ELEM_XRC_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = XRC_SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
case QED_ELEM_TASK:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
@@ -2386,8 +2422,12 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
return rc;
/* Free TSDM CXT */
- rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
- qed_cxt_get_srq_count(p_hwfn));
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
+ p_hwfn->p_cxt_mngr->xrc_srq_count);
+
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
+ p_hwfn->p_cxt_mngr->xrc_srq_count,
+ p_hwfn->p_cxt_mngr->srq_count);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c4e815f6cabd..ce08ae8d8498 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -82,7 +82,8 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
enum qed_cxt_elem_type {
QED_ELEM_CXT,
QED_ELEM_SRQ,
- QED_ELEM_TASK
+ QED_ELEM_TASK,
+ QED_ELEM_XRC_SRQ,
};
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
@@ -235,7 +236,6 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type);
u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
enum protocol_type type);
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn);
int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
@@ -358,6 +358,7 @@ struct qed_cxt_mngr {
/* total number of SRQ's for this hwfn */
u32 srq_count;
+ u32 xrc_srq_count;
/* Maximal number of L2 steering filters */
u32 arfs_count;
@@ -372,4 +373,9 @@ u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
+u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client);
+
+u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index f4eebaabb6d0..57a0dab88431 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7453,7 +7453,7 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
- &p_hwfn->cdev->dbg_params.features[feature_idx];
+ &p_hwfn->cdev->dbg_features[feature_idx];
u32 text_size_bytes, null_char_pos, i;
enum dbg_status rc;
char *text_buf;
@@ -7502,7 +7502,7 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
text_buf[i] = '\n';
/* Dump printable feature to log */
- if (p_hwfn->cdev->dbg_params.print_data)
+ if (p_hwfn->cdev->print_dbg_data)
qed_dbg_print_feature(text_buf, text_size_bytes);
/* Free the old dump_buf and point the dump_buf to the newly allocagted
@@ -7523,7 +7523,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
- &p_hwfn->cdev->dbg_params.features[feature_idx];
+ &p_hwfn->cdev->dbg_features[feature_idx];
u32 buf_size_dwords;
enum dbg_status rc;
@@ -7648,7 +7648,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
enum qed_nvm_images image_id)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
u32 len_rounded, i;
__be32 val;
int rc;
@@ -7780,7 +7780,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
int grc_params[MAX_DBG_GRC_PARAMS], i;
u32 offset = 0, feature_size;
@@ -8000,7 +8000,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
u8 cur_engine, org_engine;
@@ -8059,9 +8059,9 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
enum qed_dbg_features feature, u32 *num_dumped_bytes)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature =
- &cdev->dbg_params.features[feature];
+ &cdev->dbg_features[feature];
enum dbg_status dbg_rc;
struct qed_ptt *p_ptt;
int rc = 0;
@@ -8084,7 +8084,7 @@ int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
DP_VERBOSE(cdev, QED_MSG_DEBUG,
"copying debugfs feature to external buffer\n");
memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
- *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+ *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
4;
out:
@@ -8095,7 +8095,7 @@ out:
int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 buf_size_dwords;
@@ -8120,14 +8120,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
u8 qed_get_debug_engine(struct qed_dev *cdev)
{
- return cdev->dbg_params.engine_for_debug;
+ return cdev->engine_for_debug;
}
void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
{
DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
engine_number);
- cdev->dbg_params.engine_for_debug = engine_number;
+ cdev->engine_for_debug = engine_number;
}
void qed_dbg_pf_init(struct qed_dev *cdev)
@@ -8146,7 +8146,7 @@ void qed_dbg_pf_init(struct qed_dev *cdev)
}
/* Set the hwfn to be 0 as default */
- cdev->dbg_params.engine_for_debug = 0;
+ cdev->engine_for_debug = 0;
}
void qed_dbg_pf_exit(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 38a65b984e47..1eebf30fa798 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1972,7 +1972,7 @@ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
return 0;
if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
- p_hwfn->hw_info.multi_tc_roce_en = 0;
+ p_hwfn->hw_info.multi_tc_roce_en = false;
DP_NOTICE(p_hwfn,
"multi-tc roce was disabled to reduce requested amount of pqs\n");
if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
@@ -2269,6 +2269,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* EQ */
n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+ u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn);
enum protocol_type rdma_proto;
if (QED_IS_ROCE_PERSONALITY(p_hwfn))
@@ -2279,7 +2280,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
rdma_proto,
NULL) * 2;
- n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+ /* EQ should be able to get events from all SRQ's
+ * at the same time
+ */
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
@@ -3085,7 +3089,9 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->rel_pf_id, false);
if (rc) {
- DP_NOTICE(p_hwfn, "Final cleanup failed\n");
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt,
+ QED_HW_ERR_RAMROD_FAIL,
+ "Final cleanup failed\n");
goto load_err;
}
}
@@ -4392,7 +4398,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
}
if (QED_IS_ROCE_PERSONALITY(p_hwfn))
- p_hwfn->hw_info.multi_tc_roce_en = 1;
+ p_hwfn->hw_info.multi_tc_roce_en = true;
p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
p_hwfn->hw_info.num_active_tc = 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 4597015b8bff..f00460d00cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12400,6 +12400,13 @@ struct load_rsp_stc {
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
@@ -12488,10 +12495,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+/* Send crash dump commands with param[3:0] - opcode */
+#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
+#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000
+
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
@@ -12517,6 +12528,21 @@ struct public_drv_mb {
#define RESOURCE_DUMP 0
+/* DRV_MSG_CODE_MDUMP_CMD parameters */
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
+
+#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
+#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b
+#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c
+
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
@@ -12626,6 +12652,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xFF
+
+/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF
#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
@@ -12678,6 +12715,14 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000
+#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000
+#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000
+
+#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
+
u32 fw_mb_param;
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
@@ -12742,9 +12787,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_GET_FCOE_STATS,
MFW_DRV_MSG_GET_ISCSI_STATS,
MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_BW_UPDATE10,
+ MFW_DRV_MSG_FAILURE_DETECTED,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
- MFW_DRV_MSG_BW_UPDATE11,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
MFW_DRV_MSG_RESERVED,
MFW_DRV_MSG_GET_TLV_REQ,
MFW_DRV_MSG_OEM_CFG_UPDATE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 4ab8cfaf63d1..5fa251489536 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -762,9 +762,10 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
dst_type,
length_cur);
if (qed_status) {
- DP_NOTICE(p_hwfn,
- "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
- qed_status, src_addr, dst_addr, length_cur);
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_DMAE_FAIL,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status, src_addr,
+ dst_addr, length_cur);
break;
}
}
@@ -837,6 +838,41 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, char *fmt, ...)
+{
+ char buf[QED_HW_ERR_MAX_STR_SIZE];
+ va_list vl;
+ int len;
+
+ if (fmt) {
+ va_start(vl, fmt);
+ len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (len > QED_HW_ERR_MAX_STR_SIZE - 1)
+ len = QED_HW_ERR_MAX_STR_SIZE - 1;
+
+ DP_NOTICE(p_hwfn, "%s", buf);
+ }
+
+ /* Fan failure cannot be masked by handling of another HW error */
+ if (p_hwfn->cdev->recov_in_prog &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_DRV,
+ "Recovery is in progress. Avoid notifying about HW error %d.\n",
+ err_type);
+ return;
+ }
+
+ qed_hw_error_occurred(p_hwfn, err_type);
+
+ if (fmt)
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, len);
+}
+
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 505e94db939d..f5b109b04b66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -315,4 +315,19 @@ int qed_init_fw_data(struct qed_dev *cdev,
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase);
+#define QED_HW_ERR_MAX_STR_SIZE 256
+
+/**
+ * @brief qed_hw_err_notify - Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param err_type
+ * @param fmt - debug data buffer to send to the MFW
+ * @param ... - buffer format args
+ */
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, char *fmt, ...);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 9f5113639eaf..b7b974f0ef21 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -96,6 +96,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
#define ATTENTION_BB_DIFFERENT BIT(23)
+#define ATTENTION_CLEAR_ENABLE BIT(28)
unsigned int flags;
/* Callback to call if attention will be triggered */
@@ -363,6 +364,21 @@ static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
}
+static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
+{
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT,
+ "FW assertion!\n");
+
+ return -EINVAL;
+}
+
+static int qed_general_attention_35(struct qed_hwfn *p_hwfn)
+{
+ DP_INFO(p_hwfn, "General attention 35!\n");
+
+ return 0;
+}
+
#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
@@ -605,13 +621,15 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
{ /* After Invert 4 */
- {"General Attention 32", ATTENTION_SINGLE,
- NULL, MAX_BLOCK_ID},
+ {"General Attention 32", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_fw_assertion,
+ MAX_BLOCK_ID},
{"General Attention %d",
(2 << ATTENTION_LENGTH_SHIFT) |
(33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
- {"General Attention 35", ATTENTION_SINGLE,
- NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_general_attention_35,
+ MAX_BLOCK_ID},
{"NWS Parity",
ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
@@ -927,9 +945,12 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
qed_int_attn_print(p_hwfn, p_aeu->block_index,
ATTN_TYPE_INTERRUPT, !b_fatal);
-
- /* If the attention is benign, no need to prevent it */
- if (!rc)
+ /* Reach assertion if attention is fatal */
+ if (b_fatal)
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN,
+ "`%s': Fatal attention\n",
+ p_bit_name);
+ else /* If the attention is benign, no need to prevent it */
goto out;
/* Prevent this Attention from being asserted in the future */
@@ -2349,6 +2370,11 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
cdev->hwfns[i].b_int_requested = false;
}
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable)
+{
+ cdev->attn_clr_en = clr_enable;
+}
+
int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 9ad568d93ae6..e09db3386367 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -191,6 +191,17 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
+ * @brief qed_int_attn_clr_enable - sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+ * @param cdev
+ * @param clr_enable
+ *
+ */
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
+
+/**
* @brief - Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 037e5978787e..4afd8572ada6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2331,7 +2331,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
cdev->ll2->cb_cookie = cookie;
}
-struct qed_ll2_cbs ll2_cbs = {
+static struct qed_ll2_cbs ll2_cbs = {
.rx_comp_cb = &qed_ll2b_complete_rx_packet,
.rx_release_cb = &qed_ll2b_release_rx_packet,
.tx_comp_cb = &qed_ll2b_complete_tx_packet,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 96356e897c80..83e798d4eebb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -49,6 +49,7 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
#include <net/devlink.h>
+#include <linux/aer.h>
#include "qed.h"
#include "qed_sriov.h"
@@ -129,6 +130,8 @@ static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
+ pci_disable_pcie_error_reporting(pdev);
+
if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells);
if (cdev->regview)
@@ -231,6 +234,12 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
return -ENOMEM;
}
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc)
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Failed to configure PCIe AER [%d]\n", rc);
+
return 0;
err2:
@@ -2459,6 +2468,39 @@ void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
ops->schedule_recovery_handler(cookie);
}
+char *qed_hw_err_type_descr[] = {
+ [QED_HW_ERR_FAN_FAIL] = "Fan Failure",
+ [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure",
+ [QED_HW_ERR_HW_ATTN] = "HW Attention",
+ [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure",
+ [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure",
+ [QED_HW_ERR_FW_ASSERT] = "FW Assertion",
+ [QED_HW_ERR_LAST] = "Unknown",
+};
+
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type)
+{
+ struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ char *err_str;
+
+ if (err_type > QED_HW_ERR_LAST)
+ err_type = QED_HW_ERR_LAST;
+ err_str = qed_hw_err_type_descr[err_type];
+
+ DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
+
+ /* Call the HW error handler of the protocol driver.
+ * If it is not available - perform a minimal handling of preventing
+ * HW attentions from being reasserted.
+ */
+ if (ops && ops->schedule_hw_err_handler)
+ ops->schedule_hw_err_handler(cookie, err_type);
+ else
+ qed_int_attn_clr_enable(p_hwfn->cdev, true);
+}
+
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
void *handle)
{
@@ -2680,6 +2722,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.set_led = &qed_set_led,
.recovery_process = &qed_recovery_process,
.recovery_prolog = &qed_recovery_prolog,
+ .attn_clr_enable = &qed_int_attn_clr_enable,
.update_drv_state = &qed_update_drv_state,
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 280527cc0578..9624616806e7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -575,6 +575,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
qed_mcp_cmd_set_blocking(p_hwfn, true);
+ qed_hw_err_notify(p_hwfn, p_ptt,
+ QED_HW_ERR_MFW_RESP_FAIL, NULL);
return -EAGAIN;
}
@@ -1704,6 +1706,127 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
&resp, &param);
}
+static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
+ "Fan failure was detected on the network interface card and it's going to be shut down.\n");
+}
+
+struct qed_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
+static int
+qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mdump_cmd_params *p_mdump_cmd_params)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = -EOPNOTSUPP;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+ int rc;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = p_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
+
+ rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct mdump_retain_data_stc mdump_retain;
+ int rc;
+
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == 0 && mdump_retain.valid)
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch,
+ mdump_retain.pf, mdump_retain.status);
+ else
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device\n");
+
+ DP_NOTICE(p_hwfn,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
+ qed_mcp_mdump_ack(p_hwfn, p_ptt);
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
+}
+
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct public_func shmem_info;
@@ -1850,6 +1973,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ qed_mcp_handle_critical_error(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_GET_TLV_REQ:
qed_mfw_tlv_req(p_hwfn);
break;
@@ -3819,3 +3948,127 @@ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
DRV_MSG_CODE_SET_NVM_CFG_OPTION,
mb_param, &resp, &param, len, (u32 *)p_buf);
}
+
+#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
+#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
+#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
+ (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
+
+static int
+__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
+ DP_ERR(p_hwfn,
+ "Debug data size is %d while it should not exceed %d\n",
+ size, QED_MCP_DBG_DATA_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
+ mb_params.p_data_src = p_buf;
+ mb_params.data_src_size = size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
+ DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
+ return -EBUSY;
+ } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send debug data to the MFW [resp 0x%08x]\n",
+ mb_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_mcp_dbg_data_type {
+ QED_MCP_DBG_DATA_TYPE_RAW,
+};
+
+/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
+#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
+#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
+#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
+#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
+#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
+#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
+#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
+#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
+
+#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
+#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
+
+static int
+qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
+{
+ u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
+ u32 tmp_size = size, *p_header, *p_payload;
+ u8 flags = 0;
+ u16 seq;
+ int rc;
+
+ p_header = (u32 *)raw_data;
+ p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
+
+ seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
+
+ /* First chunk is marked as 'first' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+
+ *p_header = 0;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
+
+ while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
+ memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
+ rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ QED_MCP_DBG_DATA_MAX_SIZE);
+ if (rc)
+ return rc;
+
+ /* Clear the 'first' marking after sending the first chunk */
+ if (p_tmp_buf == p_buf) {
+ flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
+ flags);
+ }
+
+ p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ }
+
+ /* Last chunk is marked as 'last' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ memcpy(p_payload, p_tmp_buf, tmp_size);
+
+ /* Casting the left size to u8 is ok since at this point it is <= 32 */
+ return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
+ tmp_size));
+}
+
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
+{
+ return qed_mcp_send_debug_data(p_hwfn, p_ptt,
+ QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 9c4c2763de8d..5750b4c5ef63 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -685,6 +685,18 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+/**
+ * @brief Send raw debug data to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_buf - raw debug data buffer
+ * @param size - buffer size
+ */
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -731,6 +743,9 @@ struct qed_mcp_info {
/* Capabilties negotiated with the MFW */
u32 capabilities;
+
+ /* S/N for debug data mailbox commands */
+ atomic_t dbg_data_seq;
};
struct qed_mcp_mb_params {
@@ -1001,6 +1016,19 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
+/* @brief - Gets the mdump retained data from the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_retain
+ *
+ * @param return 0 upon success.
+ */
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain);
+
/**
* @brief - Sets the MFW's max value for the given resource
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 38b1f402f7ed..98455f698f53 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -212,13 +212,22 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
goto free_rdma_port;
}
+ /* Allocate bit map for XRC Domains */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
+ QED_RDMA_MAX_XRCDS, "XRCD");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate xrcd_map,rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
/* Allocate DPI bitmap */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
p_hwfn->dpi_count, "DPI");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate DPI bitmap, rc = %d\n", rc);
- goto free_pd_map;
+ goto free_xrcd_map;
}
/* Allocate bitmap for cq's. The maximum number of CQs is bound to
@@ -271,14 +280,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
goto free_cid_map;
}
+ /* The first SRQ follows the last XRC SRQ. This means that the
+ * SRQ IDs start from an offset equals to max_xrc_srqs.
+ */
+ p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count;
+ rc = qed_rdma_bmap_alloc(p_hwfn,
+ &p_rdma_info->xrc_srq_map,
+ p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
+ goto free_real_cid_map;
+ }
+
/* Allocate bitmap for srqs */
- p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn);
+ p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
p_rdma_info->num_srqs, "SRQ");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate srq bitmap, rc = %d\n", rc);
- goto free_real_cid_map;
+ goto free_xrc_srq_map;
}
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
@@ -292,6 +314,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
free_srq_map:
kfree(p_rdma_info->srq_map.bitmap);
+free_xrc_srq_map:
+ kfree(p_rdma_info->xrc_srq_map.bitmap);
free_real_cid_map:
kfree(p_rdma_info->real_cid_map.bitmap);
free_cid_map:
@@ -304,6 +328,8 @@ free_cq_map:
kfree(p_rdma_info->cq_map.bitmap);
free_dpi_map:
kfree(p_rdma_info->dpi_map.bitmap);
+free_xrcd_map:
+ kfree(p_rdma_info->xrcd_map.bitmap);
free_pd_map:
kfree(p_rdma_info->pd_map.bitmap);
free_rdma_port:
@@ -377,6 +403,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
kfree(p_rdma_info->port);
kfree(p_rdma_info->dev);
@@ -612,7 +639,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
QED_RDMA_CNQ_RAM);
p_params_header->num_cnqs = params->desired_cnq;
-
+ p_params_header->first_reg_srq_id =
+ cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset);
+ p_params_header->reg_srq_base_addr =
+ cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM));
if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
p_params_header->cq_ring_mode = 1;
else
@@ -983,6 +1013,41 @@ static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
+static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->xrcd_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n");
+ return rc;
+ }
+
+ *xrcd_id = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
+ return rc;
+}
+
+static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static enum qed_rdma_toggle_bit
qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
{
@@ -1306,11 +1371,14 @@ qed_rdma_create_qp(void *rdma_cxt,
qp->resp_offloaded = false;
qp->e2e_flow_control_en = qp->use_srq ? false : true;
qp->stats_queue = in_params->stats_queue;
+ qp->qp_type = in_params->qp_type;
+ qp->xrcd_id = in_params->xrcd_id;
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
qp->qpid = qp->icid;
} else {
+ qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE);
rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
qp->qpid = ((0xFF << 16) | qp->icid);
}
@@ -1418,6 +1486,18 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
qp->cur_state);
}
+ switch (qp->qp_type) {
+ case QED_RDMA_QP_TYPE_XRC_INI:
+ qp->has_req = 1;
+ break;
+ case QED_RDMA_QP_TYPE_XRC_TGT:
+ qp->has_resp = 1;
+ break;
+ default:
+ qp->has_req = 1;
+ qp->has_resp = 1;
+ }
+
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
enum qed_iwarp_qp_state new_state =
qed_roce2iwarp_state(qp->cur_state);
@@ -1657,6 +1737,15 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
return QED_AFFIN_HWFN(cdev);
}
+static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn,
+ bool is_xrc)
+{
+ if (is_xrc)
+ return &p_hwfn->p_rdma_info->xrc_srq_map;
+
+ return &p_hwfn->p_rdma_info->srq_map;
+}
+
static int qed_rdma_modify_srq(void *rdma_cxt,
struct qed_rdma_modify_srq_in_params *in_params)
{
@@ -1686,8 +1775,8 @@ static int qed_rdma_modify_srq(void *rdma_cxt,
if (rc)
return rc;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x",
- in_params->srq_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
+ in_params->srq_id, in_params->is_xrc);
return rc;
}
@@ -1702,6 +1791,7 @@ qed_rdma_destroy_srq(void *rdma_cxt,
struct qed_spq_entry *p_ent;
struct qed_bmap *bmap;
u16 opaque_fid;
+ u16 offset;
int rc;
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1723,14 +1813,16 @@ qed_rdma_destroy_srq(void *rdma_cxt,
if (rc)
return rc;
- bmap = &p_hwfn->p_rdma_info->srq_map;
+ bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
+ offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
+ qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x",
- in_params->srq_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
+ in_params->srq_id, in_params->is_xrc);
return rc;
}
@@ -1748,24 +1840,26 @@ qed_rdma_create_srq(void *rdma_cxt,
u16 opaque_fid, srq_id;
struct qed_bmap *bmap;
u32 returned_id;
+ u16 offset;
int rc;
- bmap = &p_hwfn->p_rdma_info->srq_map;
+ bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
if (rc) {
- DP_NOTICE(p_hwfn, "failed to allocate srq id\n");
+ DP_NOTICE(p_hwfn,
+ "failed to allocate xrc/srq id (is_xrc=%u)\n",
+ in_params->is_xrc);
return rc;
}
- elem_type = QED_ELEM_SRQ;
+ elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ);
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
if (rc)
goto err;
- /* returned id is no greater than u16 */
- srq_id = (u16)returned_id;
+
opaque_fid = p_hwfn->hw_info.opaque_fid;
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1782,20 +1876,34 @@ qed_rdma_create_srq(void *rdma_cxt,
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
- p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
p_ramrod->page_size = cpu_to_le16(in_params->page_size);
DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
+ offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
+ srq_id = (u16)returned_id + offset;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
+ if (in_params->is_xrc) {
+ SET_FIELD(p_ramrod->flags,
+ RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1);
+ SET_FIELD(p_ramrod->flags,
+ RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
+ in_params->reserved_key_en);
+ p_ramrod->xrc_srq_cq_cid =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ in_params->cq_cid);
+ p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id);
+ }
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
goto err;
out_params->srq_id = srq_id;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "SRQ created Id = %x\n", out_params->srq_id);
-
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "XRC/SRQ created Id = %x (is_xrc=%u)\n",
+ out_params->srq_id, in_params->is_xrc);
return rc;
err:
@@ -1961,6 +2069,8 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
.rdma_alloc_pd = &qed_rdma_alloc_pd,
.rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd,
+ .rdma_dealloc_xrcd = &qed_rdma_free_xrcd,
.rdma_create_cq = &qed_rdma_create_cq,
.rdma_destroy_cq = &qed_rdma_destroy_cq,
.rdma_create_qp = &qed_rdma_create_qp,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 3689fe3e5935..3898cae61e7a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -63,6 +63,11 @@
#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
+ * SRQs is much smaller so there's no need to have that many domains.
+ */
+#define QED_RDMA_MAX_XRCDS (roundup_pow_of_two(RDMA_MAX_XRC_SRQS))
+
enum qed_rdma_toggle_bit {
QED_RDMA_TOGGLE_BIT_CLEAR = 0,
QED_RDMA_TOGGLE_BIT_SET = 1
@@ -81,9 +86,11 @@ struct qed_rdma_info {
struct qed_bmap cq_map;
struct qed_bmap pd_map;
+ struct qed_bmap xrcd_map;
struct qed_bmap tid_map;
struct qed_bmap qp_map;
struct qed_bmap srq_map;
+ struct qed_bmap xrc_srq_map;
struct qed_bmap cid_map;
struct qed_bmap tcp_cid_map;
struct qed_bmap real_cid_map;
@@ -111,6 +118,7 @@ struct qed_rdma_qp {
u32 qpid;
u16 icid;
enum qed_roce_qp_state cur_state;
+ enum qed_rdma_qp_type qp_type;
enum qed_iwarp_qp_state iwarp_state;
bool use_srq;
bool signal_all;
@@ -153,18 +161,21 @@ struct qed_rdma_qp {
dma_addr_t orq_phys_addr;
u8 orq_num_pages;
bool req_offloaded;
+ bool has_req;
/* responder */
u8 max_rd_atomic_resp;
u32 rq_psn;
u16 rq_cq_id;
u16 rq_num_pages;
+ u16 xrcd_id;
dma_addr_t rq_pbl_ptr;
void *irq;
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
u32 cq_prod;
+ bool has_resp;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
@@ -172,8 +183,17 @@ struct qed_rdma_qp {
void *shared_queue;
dma_addr_t shared_queue_phys_addr;
struct qed_iwarp_ep *ep;
+ u8 edpm_mode;
};
+static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
+{
+ if (qp->qp_type == QED_RDMA_QP_TYPE_XRC_TGT ||
+ qp->qp_type == QED_RDMA_QP_TYPE_XRC_INI)
+ return true;
+
+ return false;
+}
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 37e70562a964..4566815f7b87 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -254,6 +254,9 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
int rc;
u8 tc;
+ if (!qp->has_resp)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* Allocate DMA-able memory for IRQ */
@@ -315,6 +318,10 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
qp->min_rnr_nak_timer);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
+ qed_rdma_is_xrc_qp(qp));
+
p_ramrod->max_ird = qp->max_rd_atomic_resp;
p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl;
@@ -335,6 +342,7 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
+ p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
tc = qed_roce_get_qp_tc(p_hwfn, qp);
regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
@@ -395,6 +403,9 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
int rc;
u8 tc;
+ if (!qp->has_req)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* Allocate DMA-able memory for ORQ */
@@ -444,6 +455,13 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
qp->rnr_retry_cnt);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
+ qed_rdma_is_xrc_qp(qp));
+
+ SET_FIELD(p_ramrod->flags2,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode);
+
p_ramrod->max_ord = qp->max_rd_atomic_req;
p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl;
@@ -517,6 +535,9 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent;
int rc;
+ if (!qp->has_resp)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (move_to_err && !qp->resp_offloaded)
@@ -611,6 +632,9 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent;
int rc;
+ if (!qp->has_req)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (move_to_err && !(qp->req_offloaded))
@@ -705,6 +729,11 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
dma_addr_t ramrod_res_phys;
int rc;
+ if (!qp->has_resp) {
+ *cq_prod = 0;
+ return 0;
+ }
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
*cq_prod = qp->cq_prod;
@@ -736,9 +765,9 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
- p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
- &ramrod_res_phys, GFP_KERNEL);
+ p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
if (!p_ramrod_res) {
rc = -ENOMEM;
@@ -785,6 +814,9 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
dma_addr_t ramrod_res_phys;
int rc = -ENOMEM;
+ if (!qp->has_req)
+ return 0;
+
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (!qp->req_offloaded)
@@ -872,10 +904,10 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
}
/* Send a query responder ramrod to FW to get RQ-PSN and state */
- p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(*p_resp_ramrod_res),
- &resp_ramrod_res_phys, GFP_KERNEL);
+ p_resp_ramrod_res =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_resp_ramrod_res),
+ &resp_ramrod_res_phys, GFP_KERNEL);
if (!p_resp_ramrod_res) {
DP_NOTICE(p_hwfn,
"qed query qp failed: cannot allocate memory (ramrod)\n");
@@ -920,8 +952,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
}
/* Send a query requester ramrod to FW to get SQ-PSN and state */
- p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*p_req_ramrod_res),
&req_ramrod_res_phys,
GFP_KERNEL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index f5f3c03b9dd2..790c28d696a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -160,12 +160,16 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
return 0;
}
err:
- DP_NOTICE(p_hwfn,
- "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
- le32_to_cpu(p_ent->elem.hdr.cid),
- p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
- le16_to_cpu(p_ent->elem.hdr.echo));
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
+ "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ le32_to_cpu(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ le16_to_cpu(p_ent->elem.hdr.echo));
+ qed_ptt_release(p_hwfn, p_ptt);
return -EBUSY;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 368e88565783..aabeaf03135e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -32,6 +32,7 @@
#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include "qed_vf.h"
@@ -40,9 +41,12 @@
#define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV
-#define IS_VF(cdev) ((cdev)->b_is_vf)
-#define IS_PF(cdev) (!((cdev)->b_is_vf))
-#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#define IS_VF(cdev) (is_kdump_kernel() ? \
+ (0) : ((cdev)->b_is_vf))
+#define IS_PF(cdev) (is_kdump_kernel() ? \
+ (1) : !((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (is_kdump_kernel() ? \
+ (0) : !!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_VF(cdev) (0)
#define IS_PF(cdev) (1)
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 234c6f30effb..8857da1208d7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -278,6 +278,14 @@ struct qede_dev {
struct qede_rdma_dev rdma_info;
struct bpf_prog *xdp_prog;
+
+ unsigned long err_flags;
+#define QEDE_ERR_IS_HANDLED 31
+#define QEDE_ERR_ATTN_CLR_EN 0
+#define QEDE_ERR_GET_DBG_INFO 1
+#define QEDE_ERR_IS_RECOVERABLE 2
+#define QEDE_ERR_WARN 3
+
struct qede_dump_info dump_info;
};
@@ -485,11 +493,15 @@ struct qede_fastpath {
#define QEDE_SP_RECOVERY 0
#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RSVD1 2
+#define QEDE_SP_RSVD2 3
+#define QEDE_SP_HW_ERR 4
+#define QEDE_SP_ARFS_CONFIG 5
+#define QEDE_SP_AER 7
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-#define QEDE_SP_ARFS_CONFIG 4
#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
#endif
@@ -521,7 +533,6 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
-void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
int qede_free_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq, int *len);
@@ -574,12 +585,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_KDUMP_MIN 63
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_KDUMP_MIN 63
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 812c7766e096..24cc68391ac4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -190,12 +190,14 @@ static const struct {
enum {
QEDE_PRI_FLAG_CMT,
QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
+ QEDE_PRI_FLAG_RECOVER_ON_ERROR,
QEDE_PRI_FLAG_LEN,
};
static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
"SmartAN capable",
+ "Recover on error",
};
enum qede_ethtool_tests {
@@ -417,9 +419,30 @@ static u32 qede_get_priv_flags(struct net_device *dev)
if (edev->dev_info.common.smart_an)
flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT);
+ if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
+ flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+
return flags;
}
+static int qede_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 cflags = qede_get_priv_flags(dev);
+ u32 dflags = flags ^ cflags;
+
+ /* can only change RECOVER_ON_ERROR flag */
+ if (dflags & ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ return -EINVAL;
+
+ if (flags & BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ set_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+ else
+ clear_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+
+ return 0;
+}
+
struct qede_link_mode_mapping {
u32 qed_link_mode;
u32 ethtool_link_mode;
@@ -2098,6 +2121,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
.get_priv_flags = qede_get_priv_flags,
+ .set_priv_flags = qede_set_priv_flags,
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index c6c20776b474..7598ebe0962a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1066,6 +1066,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.rxq = &rxq->xdp_rxq;
+ xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 34fa3917eb33..b2d154258b07 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/crash_dump.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/version.h>
@@ -60,6 +61,7 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
+#include <linux/aer.h>
#include "qede.h"
#include "qede_ptp.h"
@@ -124,6 +126,8 @@ static const struct pci_device_id qede_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
#define TX_TIMEOUT (5 * HZ)
@@ -135,10 +139,12 @@ static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
static void qede_schedule_recovery_handler(void *dev);
static void qede_recovery_handler(struct qede_dev *edev);
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
-
+static void qede_generic_hw_err_handler(struct qede_dev *edev);
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -203,6 +209,10 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
}
#endif
+static const struct pci_error_handlers qede_err_handler = {
+ .error_detected = qede_io_error_detected,
+};
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
@@ -212,6 +222,7 @@ static struct pci_driver qede_pci_driver = {
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
+ .err_handler = &qede_err_handler,
};
static struct qed_eth_cb_ops qede_ll_ops = {
@@ -221,6 +232,7 @@ static struct qed_eth_cb_ops qede_ll_ops = {
#endif
.link_update = qede_link_update,
.schedule_recovery_handler = qede_schedule_recovery_handler,
+ .schedule_hw_err_handler = qede_schedule_hw_err_handler,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
@@ -527,6 +539,51 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
+static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ DP_NOTICE(edev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl),
+ jiffies);
+}
+
+static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_tx_queue *txq;
+ int cos;
+
+ netif_carrier_off(dev);
+ DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
+
+ if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
+ return;
+
+ for_each_cos_in_txq(edev, cos) {
+ txq = &edev->fp_array[txqueue].txq[cos];
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, txq);
+ }
+
+ if (IS_VF(edev))
+ return;
+
+ if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) {
+ DP_INFO(edev,
+ "Avoid handling a Tx timeout while another HW error is being handled\n");
+ return;
+ }
+
+ set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -614,6 +671,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
.ndo_do_ioctl = qede_ioctl,
+ .ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
.ndo_set_vf_vlan = qede_set_vf_vlan,
@@ -707,8 +765,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->dp_module = dp_module;
edev->dp_level = dp_level;
edev->ops = qed_ops;
- edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
- edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ if (is_kdump_kernel()) {
+ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
+ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
+ } else {
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ }
DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
info->num_queues, info->num_queues);
@@ -974,7 +1038,8 @@ static void qede_sp_task(struct work_struct *work)
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
*/
- qede_sriov_configure(edev->pdev, 0);
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
#endif
qede_lock(edev);
qede_recovery_handler(edev);
@@ -993,7 +1058,20 @@ static void qede_sp_task(struct work_struct *work)
qede_process_arfs_filters(edev, false);
}
#endif
+ if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
+ qede_generic_hw_err_handler(edev);
__qede_unlock(edev);
+
+ if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ edev->ops->common->recovery_process(edev->cdev);
+ }
}
static void qede_update_pf_params(struct qed_dev *cdev)
@@ -1187,7 +1265,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n");
- is_vf = true;
+ is_vf = is_kdump_kernel() ? false : true;
break;
default:
if (debug & QED_LOG_VERBOSE_MASK)
@@ -1398,7 +1476,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
if (rxq->rx_buf_size + size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE - size;
- /* Segment size to spilt a page in multiple equal parts ,
+ /* Segment size to split a page in multiple equal parts,
* unless XDP is used in which case we'd use the entire page.
*/
if (!edev->xdp_prog) {
@@ -1694,7 +1772,7 @@ static void qede_init_fp(struct qede_dev *edev)
txq->ndev_txq_id = ndev_tx_id;
if (edev->dev_info.is_legacy)
- txq->is_legacy = 1;
+ txq->is_legacy = true;
txq->dev = &edev->pdev->dev;
}
@@ -2482,6 +2560,100 @@ err:
qede_recovery_failed(edev);
}
+static void qede_atomic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Get a call trace of the flow that led to the error */
+ WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
+
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
+ edev->ops->common->attn_clr_enable(cdev, true);
+
+ DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
+}
+
+static void qede_generic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Trigger a recovery process.
+ * This is placed in the sleep requiring section just to make
+ * sure it is the last one, and that all the other operations
+ * were completed.
+ */
+ if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags))
+ edev->ops->common->recovery_process(cdev);
+
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+
+ DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
+}
+
+static void qede_set_hw_err_flags(struct qede_dev *edev,
+ enum qed_hw_err_type err_type)
+{
+ unsigned long err_flags = 0;
+
+ switch (err_type) {
+ case QED_HW_ERR_DMAE_FAIL:
+ set_bit(QEDE_ERR_WARN, &err_flags);
+ fallthrough;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
+ set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
+ break;
+
+ default:
+ DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
+ break;
+ }
+
+ edev->err_flags |= err_flags;
+}
+
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qede_dev *edev = dev;
+
+ /* Fan failure cannot be masked by handling of another HW error or by a
+ * concurrent recovery process.
+ */
+ if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_INFO(edev,
+ "Avoid scheduling an error handling while another HW error is being handled\n");
+ return;
+ }
+
+ if (err_type >= QED_HW_ERR_LAST) {
+ DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+ return;
+ }
+
+ qede_set_hw_err_flags(edev, err_type);
+ qede_atomic_hw_err_handler(edev);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
+}
+
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
@@ -2579,3 +2751,49 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
etlv->num_txqs_full_set = true;
etlv->num_rxqs_full_set = true;
}
+
+/**
+ * qede_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev)
+ return PCI_ERS_RESULT_NONE;
+
+ DP_NOTICE(edev, "IO error detected [%d]\n", state);
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev, "Device already in the recovery state\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* PF handles the recovery of its VFs */
+ if (IS_VF(edev)) {
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "VF recovery is handled by its PF\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ set_bit(QEDE_SP_AER, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ __qede_unlock(edev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 134611aa2c9a..d838774af5a6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1880,12 +1880,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
}
-static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
- ulong off, u32 data)
-{
- return adapter->ahw->hw_ops->write_reg(adapter, off, data);
-}
-
static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
u8 *mac, u8 function)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f7c2f32237cb..7adbb03cb931 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1582,10 +1582,10 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
!adapter->fdb_mac_learn) {
qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = 1;
+ adapter->drv_mac_learn = true;
adapter->rx_mac_learn = true;
} else {
- adapter->drv_mac_learn = 0;
+ adapter->drv_mac_learn = false;
adapter->rx_mac_learn = false;
}
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 251d4ac4af02..117188e3c7de 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1431,8 +1431,9 @@ error:
}
/* Transmit the packet using specified transmit queue */
-int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
- struct sk_buff *skb)
+netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb)
{
struct emac_tpd tpd;
u32 prod_idx;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index ae08bdd9046c..920123eb8ace 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -227,8 +227,9 @@ void emac_mac_stop(struct emac_adapter *adpt);
void emac_mac_mode_config(struct emac_adapter *adpt);
void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
int *num_pkts, int max_pkts);
-int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
- struct sk_buff *skb);
+netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ struct emac_tx_queue *tx_q,
+ struct sk_buff *skb);
void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q);
void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 18b0c7a2d6dc..20b1b43a0e39 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -115,7 +115,8 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget)
}
/* Transmit the packet */
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 60d342f82fb3..e291e6ac40cb 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -2054,10 +2054,9 @@ static void cp_remove_one (struct pci_dev *pdev)
free_netdev(dev);
}
-#ifdef CONFIG_PM
-static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused cp_suspend(struct device *device)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@@ -2075,16 +2074,14 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_irqrestore (&cp->lock, flags);
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ device_set_wakeup_enable(device, cp->wol_enabled);
return 0;
}
-static int cp_resume (struct pci_dev *pdev)
+static int __maybe_unused cp_resume(struct device *device)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@@ -2093,10 +2090,6 @@ static int cp_resume (struct pci_dev *pdev)
netif_device_attach (dev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D0, 0);
-
/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
cp_init_rings_index (cp);
cp_init_hw (cp);
@@ -2111,7 +2104,6 @@ static int cp_resume (struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
static const struct pci_device_id cp_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
@@ -2120,15 +2112,14 @@ static const struct pci_device_id cp_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
+static SIMPLE_DEV_PM_OPS(cp_pm_ops, cp_suspend, cp_resume);
+
static struct pci_driver cp_driver = {
.name = DRV_NAME,
.id_table = cp_pci_tbl,
.probe = cp_init_one,
.remove = cp_remove_one,
-#ifdef CONFIG_PM
- .resume = cp_resume,
- .suspend = cp_suspend,
-#endif
+ .driver.pm = &cp_pm_ops,
};
module_pci_driver(cp_driver);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 5caeb8368eab..227139d42227 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2603,17 +2603,13 @@ static void rtl8139_set_rx_mode (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags);
}
-#ifdef CONFIG_PM
-
-static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused rtl8139_suspend(struct device *device)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
- pci_save_state (pdev);
-
if (!netif_running (dev))
return 0;
@@ -2631,38 +2627,30 @@ static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_irqrestore (&tp->lock, flags);
- pci_set_power_state (pdev, PCI_D3hot);
-
return 0;
}
-
-static int rtl8139_resume (struct pci_dev *pdev)
+static int __maybe_unused rtl8139_resume(struct device *device)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = dev_get_drvdata(device);
- pci_restore_state (pdev);
if (!netif_running (dev))
return 0;
- pci_set_power_state (pdev, PCI_D0);
+
rtl8139_init_ring (dev);
rtl8139_hw_start (dev);
netif_device_attach (dev);
return 0;
}
-#endif /* CONFIG_PM */
-
+static SIMPLE_DEV_PM_OPS(rtl8139_pm_ops, rtl8139_suspend, rtl8139_resume);
static struct pci_driver rtl8139_pci_driver = {
.name = DRV_NAME,
.id_table = rtl8139_pci_tbl,
.probe = rtl8139_init_one,
.remove = rtl8139_remove_one,
-#ifdef CONFIG_PM
- .suspend = rtl8139_suspend,
- .resume = rtl8139_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = &rtl8139_pm_ops,
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index c51b48dc3639..4d2ec9742cee 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -19,7 +18,6 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
-#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/io.h>
#include <linux/ip.h>
@@ -27,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
+#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@@ -58,9 +57,6 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
-#define R8169_MSG_DEFAULT \
- (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
#define MC_FILTER_LIMIT 32
@@ -75,6 +71,8 @@
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+#define OCP_STD_PHY_BASE 0xa400
+
#define RTL_CFG_NO_GBIT 1
/* write/read MMIO register */
@@ -85,10 +83,10 @@
#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
-#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
-#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
-#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
-#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
+#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct {
const char *name;
@@ -176,10 +174,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static struct {
- u32 msg_enable;
-} debug = { -1 };
-
enum rtl_registers {
MAC0 = 0, /* Ethernet hardware address. */
MAC4 = 4,
@@ -227,10 +221,13 @@ enum rtl_registers {
CPlusCmd = 0xe0,
IntrMitigate = 0xe2,
-#define RTL_COALESCE_MASK 0x0f
-#define RTL_COALESCE_SHIFT 4
-#define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK)
-#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2)
+#define RTL_COALESCE_TX_USECS GENMASK(15, 12)
+#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8)
+#define RTL_COALESCE_RX_USECS GENMASK(7, 4)
+#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0)
+
+#define RTL_COALESCE_T_MAX 0x0fU
+#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4)
RxDescAddrLow = 0xe4,
RxDescAddrHigh = 0xe8,
@@ -386,10 +383,12 @@ enum rtl_register_content {
/* rx_mode_bits */
AcceptErr = 0x20,
AcceptRunt = 0x10,
+#define RX_CONFIG_ACCEPT_ERR_MASK 0x30
AcceptBroadcast = 0x08,
AcceptMulticast = 0x04,
AcceptMyPhys = 0x02,
AcceptAllPhys = 0x01,
+#define RX_CONFIG_ACCEPT_OK_MASK 0x0f
#define RX_CONFIG_ACCEPT_MASK 0x3f
/* TxConfigBits */
@@ -596,7 +595,6 @@ struct rtl8169_private {
struct net_device *dev;
struct phy_device *phydev;
struct napi_struct napi;
- u32 msg_enable;
enum mac_version mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
@@ -638,8 +636,6 @@ typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
-module_param_named(debug, debug.msg_enable, int, 0);
-MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_SOFTDEP("pre: realtek");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_8168D_1);
@@ -727,53 +723,35 @@ struct rtl_cond {
const char *msg;
};
-static void rtl_udelay(unsigned int d)
-{
- udelay(d);
-}
-
static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
- void (*delay)(unsigned int), unsigned int d, int n,
- bool high)
+ unsigned long usecs, int n, bool high)
{
int i;
for (i = 0; i < n; i++) {
if (c->check(tp) == high)
return true;
- delay(d);
+ fsleep(usecs);
}
- netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
- c->msg, !high, n, d);
- return false;
-}
-static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
-{
- return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
-}
-
-static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
-{
- return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
+ if (net_ratelimit())
+ netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n",
+ c->msg, !high, n, usecs);
+ return false;
}
-static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
+static bool rtl_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
{
- return rtl_loop_wait(tp, c, msleep, d, n, true);
+ return rtl_loop_wait(tp, c, d, n, true);
}
-static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned int d, int n)
+static bool rtl_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
{
- return rtl_loop_wait(tp, c, msleep, d, n, false);
+ return rtl_loop_wait(tp, c, d, n, false);
}
#define DECLARE_RTL_COND(name) \
@@ -789,7 +767,8 @@ static bool name ## _check(struct rtl8169_private *tp)
static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
{
if (reg & 0xffff0001) {
- netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
+ if (net_ratelimit())
+ netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg);
return true;
}
return false;
@@ -807,7 +786,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
- rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
+ rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
}
static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
@@ -817,7 +796,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
RTL_W32(tp, GPHY_OCP, reg << 15);
- return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
+ return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
}
@@ -847,8 +826,6 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
}
-#define OCP_STD_PHY_BASE 0xa400
-
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
if (reg == 0x1f) {
@@ -897,7 +874,7 @@ static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
- rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
+ rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
* According to hardware specs a 20us delay is required after write
* complete indication, but before sending next command.
@@ -911,7 +888,7 @@ static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
- value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
+ value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
/*
@@ -934,7 +911,7 @@ static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
RTL_W32(tp, EPHY_RXER_NUM, 0);
- rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
+ rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
@@ -951,7 +928,7 @@ static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
RTL_W32(tp, EPHY_RXER_NUM, 0);
- return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
}
@@ -1037,7 +1014,7 @@ static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
+ rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
udelay(10);
}
@@ -1046,7 +1023,7 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
@@ -1072,7 +1049,7 @@ static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
r8168fp_adjust_ocp_cmd(tp, &cmd, type);
RTL_W32(tp, ERIAR, cmd);
- rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
+ rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
@@ -1088,7 +1065,7 @@ static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
r8168fp_adjust_ocp_cmd(tp, &cmd, type);
RTL_W32(tp, ERIAR, cmd);
- return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
RTL_R32(tp, ERIDR) : ~0;
}
@@ -1097,35 +1074,31 @@ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
}
-static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
- u32 m)
+static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
{
- u32 val;
+ u32 val = rtl_eri_read(tp, addr);
- val = rtl_eri_read(tp, addr);
- rtl_eri_write(tp, addr, mask, (val & ~m) | p);
+ rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
}
-static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask,
- u32 p)
+static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
{
- rtl_w0w1_eri(tp, addr, mask, p, 0);
+ rtl_w0w1_eri(tp, addr, p, 0);
}
-static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
- u32 m)
+static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
{
- rtl_w0w1_eri(tp, addr, mask, 0, m);
+ rtl_w0w1_eri(tp, addr, 0, m);
}
-static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
+static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
{
- RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
+ RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
+ return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
RTL_R32(tp, OCPDR) : ~0;
}
-static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
+static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg)
{
return _rtl_eri_read(tp, reg, ERIAR_OOB);
}
@@ -1135,7 +1108,7 @@ static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
{
RTL_W32(tp, OCPDR, data);
RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
+ rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
}
static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
@@ -1167,12 +1140,12 @@ DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
reg = rtl8168_get_ocp_reg(tp);
- return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
+ return r8168dp_ocp_read(tp, reg) & 0x00000800;
}
DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
{
- return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x124) & 0x00000001;
}
DECLARE_RTL_COND(rtl_ocp_tx_cond)
@@ -1183,7 +1156,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
{
RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
- rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
+ rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000);
RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
@@ -1191,15 +1164,14 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
+ rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
- r8168ep_ocp_write(tp, 0x01, 0x30,
- r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
- rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
+ r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1222,16 +1194,15 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
+ rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
- r8168ep_ocp_write(tp, 0x01, 0x30,
- r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
+ r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
+ rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -1255,12 +1226,12 @@ static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
+ return !!(r8168dp_ocp_read(tp, reg) & 0x00008000);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
+ return r8168ep_ocp_read(tp, 0x128) & 0x00000001;
}
static bool r8168_check_dash(struct rtl8169_private *tp)
@@ -1279,8 +1250,8 @@ static bool r8168_check_dash(struct rtl8169_private *tp)
static void rtl_reset_packet_filter(struct rtl8169_private *tp)
{
- rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
- rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
+ rtl_eri_clear_bits(tp, 0xdc, BIT(0));
+ rtl_eri_set_bits(tp, 0xdc, BIT(0));
}
DECLARE_RTL_COND(rtl_efusear_cond)
@@ -1292,7 +1263,7 @@ u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
{
RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
- return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
+ return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
@@ -1407,11 +1378,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
if (rtl_is_8168evl_up(tp)) {
tmp--;
if (wolopts & WAKE_MAGIC)
- rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
- MagicPacket_v2);
+ rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2);
else
- rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
- MagicPacket_v2);
+ rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2);
} else if (rtl_is_8125(tp)) {
tmp--;
if (wolopts & WAKE_MAGIC)
@@ -1436,7 +1405,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_61:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
@@ -1510,19 +1479,15 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
return features;
}
-static int rtl8169_set_features(struct net_device *dev,
- netdev_features_t features)
+static void rtl_set_rx_config_features(struct rtl8169_private *tp,
+ netdev_features_t features)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- u32 rx_config;
+ u32 rx_config = RTL_R32(tp, RxConfig);
- rtl_lock_work(tp);
-
- rx_config = RTL_R32(tp, RxConfig);
if (features & NETIF_F_RXALL)
- rx_config |= (AcceptErr | AcceptRunt);
+ rx_config |= RX_CONFIG_ACCEPT_ERR_MASK;
else
- rx_config &= ~(AcceptErr | AcceptRunt);
+ rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK;
if (rtl_is_8125(tp)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -1532,6 +1497,16 @@ static int rtl8169_set_features(struct net_device *dev,
}
RTL_W32(tp, RxConfig, rx_config);
+}
+
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl_lock_work(tp);
+
+ rtl_set_rx_config_features(tp, features);
if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum;
@@ -1581,20 +1556,6 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
rtl_unlock_work(tp);
}
-static u32 rtl8169_get_msglevel(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return tp->msg_enable;
-}
-
-static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- tp->msg_enable = value;
-}
-
static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
"tx_packets",
"rx_packets",
@@ -1626,7 +1587,7 @@ DECLARE_RTL_COND(rtl_counters_cond)
return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
}
-static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
+static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
{
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
@@ -1637,22 +1598,20 @@ static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
RTL_W32(tp, CounterAddrLow, cmd);
RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
- return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+ rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
-static bool rtl8169_reset_counters(struct rtl8169_private *tp)
+static void rtl8169_reset_counters(struct rtl8169_private *tp)
{
/*
* Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
* tally counters.
*/
- if (tp->mac_version < RTL_GIGA_MAC_VER_19)
- return true;
-
- return rtl8169_do_counters(tp, CounterReset);
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_19)
+ rtl8169_do_counters(tp, CounterReset);
}
-static bool rtl8169_update_counters(struct rtl8169_private *tp)
+static void rtl8169_update_counters(struct rtl8169_private *tp)
{
u8 val = RTL_R8(tp, ChipCmd);
@@ -1660,16 +1619,13 @@ static bool rtl8169_update_counters(struct rtl8169_private *tp)
* Some chips are unable to dump tally counters when the receiver
* is disabled. If 0xff chip may be in a PCI power-save state.
*/
- if (!(val & CmdRxEnb) || val == 0xff)
- return true;
-
- return rtl8169_do_counters(tp, CounterDump);
+ if (val & CmdRxEnb && val != 0xff)
+ rtl8169_do_counters(tp, CounterDump);
}
-static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
+static void rtl8169_init_counter_offsets(struct rtl8169_private *tp)
{
struct rtl8169_counters *counters = tp->counters;
- bool ret = false;
/*
* rtl8169_init_counter_offsets is called from rtl_open. On chip
@@ -1687,22 +1643,16 @@ static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
*/
if (tp->tc_offset.inited)
- return true;
-
- /* If both, reset and update fail, propagate to caller. */
- if (rtl8169_reset_counters(tp))
- ret = true;
+ return;
- if (rtl8169_update_counters(tp))
- ret = true;
+ rtl8169_reset_counters(tp);
+ rtl8169_update_counters(tp);
tp->tc_offset.tx_errors = counters->tx_errors;
tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
tp->tc_offset.tx_aborted = counters->tx_aborted;
tp->tc_offset.rx_missed = counters->rx_missed;
tp->tc_offset.inited = true;
-
- return ret;
}
static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -1773,46 +1723,34 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
* 1 1 160us 81.92us 1.31ms
*/
-/* rx/tx scale factors for one particular CPlusCmd[0:1] value */
-struct rtl_coalesce_scale {
- /* Rx / Tx */
- u32 nsecs[2];
-};
-
/* rx/tx scale factors for all CPlusCmd[0:1] cases */
struct rtl_coalesce_info {
u32 speed;
- struct rtl_coalesce_scale scalev[4]; /* each CPlusCmd[0:1] case */
+ u32 scale_nsecs[4];
};
-/* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */
-#define rxtx_x1822(r, t) { \
- {{(r), (t)}}, \
- {{(r)*8, (t)*8}}, \
- {{(r)*8*2, (t)*8*2}}, \
- {{(r)*8*2*2, (t)*8*2*2}}, \
-}
+/* produce array with base delay *1, *8, *8*2, *8*2*2 */
+#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) }
+
static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
- /* speed delays: rx00 tx00 */
- { SPEED_10, rxtx_x1822(40960, 40960) },
- { SPEED_100, rxtx_x1822( 2560, 2560) },
- { SPEED_1000, rxtx_x1822( 320, 320) },
+ { SPEED_10, COALESCE_DELAY(40960) },
+ { SPEED_100, COALESCE_DELAY(2560) },
+ { SPEED_1000, COALESCE_DELAY(320) },
{ 0 },
};
static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
- /* speed delays: rx00 tx00 */
- { SPEED_10, rxtx_x1822(40960, 40960) },
- { SPEED_100, rxtx_x1822( 2560, 2560) },
- { SPEED_1000, rxtx_x1822( 5000, 5000) },
+ { SPEED_10, COALESCE_DELAY(40960) },
+ { SPEED_100, COALESCE_DELAY(2560) },
+ { SPEED_1000, COALESCE_DELAY(5000) },
{ 0 },
};
-#undef rxtx_x1822
+#undef COALESCE_DELAY
/* get rx/tx scale vector corresponding to current speed */
-static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
+static const struct rtl_coalesce_info *
+rtl_coalesce_info(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
const struct rtl_coalesce_info *ci;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
@@ -1832,16 +1770,8 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct rtl8169_private *tp = netdev_priv(dev);
const struct rtl_coalesce_info *ci;
- const struct rtl_coalesce_scale *scale;
- struct {
- u32 *max_frames;
- u32 *usecs;
- } coal_settings [] = {
- { &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs },
- { &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs }
- }, *p = coal_settings;
- int i;
- u16 w;
+ u32 scale, c_us, c_fr;
+ u16 intrmit;
if (rtl_is_8125(tp))
return -EOPNOTSUPP;
@@ -1849,111 +1779,111 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
memset(ec, 0, sizeof(*ec));
/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
- ci = rtl_coalesce_info(dev);
+ ci = rtl_coalesce_info(tp);
if (IS_ERR(ci))
return PTR_ERR(ci);
- scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
+ scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK];
- /* read IntrMitigate and adjust according to scale */
- for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
- *p->max_frames = (w & RTL_COALESCE_MASK) << 2;
- w >>= RTL_COALESCE_SHIFT;
- *p->usecs = w & RTL_COALESCE_MASK;
- }
+ intrmit = RTL_R16(tp, IntrMitigate);
- for (i = 0; i < 2; i++) {
- p = coal_settings + i;
- *p->usecs = (*p->usecs * scale->nsecs[i]) / 1000;
+ c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit);
+ ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
- /*
- * ethtool_coalesce says it is illegal to set both usecs and
- * max_frames to 0.
- */
- if (!*p->usecs && !*p->max_frames)
- *p->max_frames = 1;
- }
+ c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit);
+ /* ethtool_coalesce states usecs and max_frames must not both be 0 */
+ ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
+
+ c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit);
+ ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
+
+ c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit);
+ ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
return 0;
}
-/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */
-static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale(
- struct net_device *dev, u32 nsec, u16 *cp01)
+/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */
+static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec,
+ u16 *cp01)
{
const struct rtl_coalesce_info *ci;
u16 i;
- ci = rtl_coalesce_info(dev);
+ ci = rtl_coalesce_info(tp);
if (IS_ERR(ci))
- return ERR_CAST(ci);
+ return PTR_ERR(ci);
for (i = 0; i < 4; i++) {
- u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0],
- ci->scalev[i].nsecs[1]);
- if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) {
+ if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) {
*cp01 = i;
- return &ci->scalev[i];
+ return ci->scale_nsecs[i];
}
}
- return ERR_PTR(-EINVAL);
+ return -ERANGE;
}
static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct rtl8169_private *tp = netdev_priv(dev);
- const struct rtl_coalesce_scale *scale;
- struct {
- u32 frames;
- u32 usecs;
- } coal_settings [] = {
- { ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs },
- { ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs }
- }, *p = coal_settings;
- u16 w = 0, cp01;
- int i;
+ u32 tx_fr = ec->tx_max_coalesced_frames;
+ u32 rx_fr = ec->rx_max_coalesced_frames;
+ u32 coal_usec_max, units;
+ u16 w = 0, cp01 = 0;
+ int scale;
if (rtl_is_8125(tp))
return -EOPNOTSUPP;
- scale = rtl_coalesce_choose_scale(dev,
- max(p[0].usecs, p[1].usecs) * 1000, &cp01);
- if (IS_ERR(scale))
- return PTR_ERR(scale);
+ if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX)
+ return -ERANGE;
- for (i = 0; i < 2; i++, p++) {
- u32 units;
+ coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs);
+ scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01);
+ if (scale < 0)
+ return scale;
- /*
- * accept max_frames=1 we returned in rtl_get_coalesce.
- * accept it not only when usecs=0 because of e.g. the following scenario:
- *
- * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
- * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
- * - then user does `ethtool -C eth0 rx-usecs 100`
- *
- * since ethtool sends to kernel whole ethtool_coalesce
- * settings, if we do not handle rx_usecs=!0, rx_frames=1
- * we'll reject it below in `frames % 4 != 0`.
- */
- if (p->frames == 1) {
- p->frames = 0;
- }
+ /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it
+ * not only when usecs=0 because of e.g. the following scenario:
+ *
+ * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
+ * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
+ * - then user does `ethtool -C eth0 rx-usecs 100`
+ *
+ * Since ethtool sends to kernel whole ethtool_coalesce settings,
+ * if we want to ignore rx_frames then it has to be set to 0.
+ */
+ if (rx_fr == 1)
+ rx_fr = 0;
+ if (tx_fr == 1)
+ tx_fr = 0;
+
+ /* HW requires time limit to be set if frame limit is set */
+ if ((tx_fr && !ec->tx_coalesce_usecs) ||
+ (rx_fr && !ec->rx_coalesce_usecs))
+ return -EINVAL;
- units = p->usecs * 1000 / scale->nsecs[i];
- if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4)
- return -EINVAL;
+ w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4));
+ w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4));
- w <<= RTL_COALESCE_SHIFT;
- w |= units;
- w <<= RTL_COALESCE_SHIFT;
- w |= p->frames >> 2;
- }
+ units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale);
+ w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units);
+ units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale);
+ w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units);
rtl_lock_work(tp);
- RTL_W16(tp, IntrMitigate, swab16(w));
+ RTL_W16(tp, IntrMitigate, w);
+
+ /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */
+ if (rtl_is_8168evl_up(tp)) {
+ if (!rx_fr && !tx_fr)
+ /* disable packet counter */
+ tp->cp_cmd |= PktCntrDisable;
+ else
+ tp->cp_cmd &= ~PktCntrDisable;
+ }
tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
@@ -2002,12 +1932,6 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
goto out;
}
- if (dev->phydev->autoneg == AUTONEG_DISABLE ||
- dev->phydev->duplex != DUPLEX_FULL) {
- ret = -EPROTONOSUPPORT;
- goto out;
- }
-
ret = phy_ethtool_set_eee(tp->phydev, data);
if (!ret)
@@ -2026,8 +1950,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_coalesce = rtl_get_coalesce,
.set_coalesce = rtl_set_coalesce,
- .get_msglevel = rtl8169_get_msglevel,
- .set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
.get_wol = rtl8169_get_wol,
.set_wol = rtl8169_set_wol,
@@ -2202,7 +2124,7 @@ static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
if (tp->mac_version != RTL_GIGA_MAC_VER_38)
RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
- rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
+ rtl_eri_set_bits(tp, 0x1b0, 0x0003);
}
static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
@@ -2366,7 +2288,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
- rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
+ rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
default:
@@ -2399,15 +2321,13 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
+ rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
break;
default:
break;
}
phy_resume(tp->phydev);
- /* give MAC/PHY some time to resume */
- msleep(20);
}
static void rtl_init_rxcfg(struct rtl8169_private *tp)
@@ -2426,8 +2346,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
- RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_VLAN_8125 |
- RX_DMA_BURST);
+ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
default:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
@@ -2541,7 +2460,7 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
{
RTL_W8(tp, ChipCmd, CmdReset);
- rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
+ rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
static void rtl_request_firmware(struct rtl8169_private *tp)
@@ -2553,10 +2472,8 @@ static void rtl_request_firmware(struct rtl8169_private *tp)
return;
rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
- if (!rtl_fw) {
- netif_warn(tp, ifup, tp->dev, "Unable to load firmware, out of memory\n");
+ if (!rtl_fw)
return;
- }
rtl_fw->phy_write = rtl_writephy;
rtl_fw->phy_read = rtl_readphy;
@@ -2586,31 +2503,31 @@ DECLARE_RTL_COND(rtl_txcfg_empty_cond)
return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
}
-static void rtl8169_hw_reset(struct rtl8169_private *tp)
+DECLARE_RTL_COND(rtl_rxtx_empty_cond)
{
- /* Disable interrupts */
- rtl8169_irq_mask_and_ack(tp);
-
- rtl_rx_close(tp);
+ return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+}
+static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
+{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
- break;
- case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
- RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
- rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
+ rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
+ rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
+ break;
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
default:
- RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
- udelay(100);
break;
}
+}
- rtl_hw_reset(tp);
+static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
+{
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
+ fsleep(2000);
+ rtl_wait_txrx_fifo_empty(tp);
}
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
@@ -2643,7 +2560,7 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
}
-static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
+static void rtl8169_set_magic_reg(struct rtl8169_private *tp)
{
u32 val;
@@ -2669,8 +2586,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
u32 tmp;
if (dev->flags & IFF_PROMISC) {
- /* Unconditionally log net taps. */
- netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode |= AcceptAllPhys;
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
dev->flags & IFF_ALLMULTI ||
@@ -2683,7 +2598,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
- u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ u32 bit_nr = eth_hw_addr_crc(ha) >> 26;
mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
}
@@ -2694,14 +2609,11 @@ static void rtl_set_rx_mode(struct net_device *dev)
}
}
- if (dev->features & NETIF_F_RXALL)
- rx_mode |= (AcceptErr | AcceptRunt);
-
RTL_W32(tp, MAR0 + 4, mc_filter[1]);
RTL_W32(tp, MAR0 + 0, mc_filter[0]);
tmp = RTL_R32(tp, RxConfig);
- RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode);
+ RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode);
}
DECLARE_RTL_COND(rtl_csiar_cond)
@@ -2717,7 +2629,7 @@ static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE | func << 16);
- rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
+ rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
@@ -2727,7 +2639,7 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
CSIAR_BYTE_ENABLE);
- return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
RTL_R32(tp, CSIDR) : ~0;
}
@@ -2984,12 +2896,14 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_2);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
+ rtl_eri_set_bits(tp, 0x0d4, 0x1f00);
+ rtl_eri_set_bits(tp, 0x1d0, BIT(1));
+ rtl_reset_packet_filter(tp);
+ rtl_eri_set_bits(tp, 0x1b0, BIT(4));
rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
- rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
rtl_disable_clock_request(tp);
@@ -3009,11 +2923,11 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
rtl_reset_packet_filter(tp);
- rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
- rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4));
+ rtl_eri_set_bits(tp, 0x1b0, BIT(4));
+ rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1));
rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
@@ -3042,7 +2956,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168f_1);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
+ rtl_eri_set_bits(tp, 0x0d4, 0x1f00);
}
static void rtl_hw_start_8411(struct rtl8169_private *tp)
@@ -3060,7 +2974,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168f_1);
- rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00);
+ rtl_eri_set_bits(tp, 0x0d4, 0x0c00);
}
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
@@ -3077,11 +2991,12 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_set_bits(tp, 0x0d4, 0x1f80);
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
- rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+ rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
+ rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
}
@@ -3307,9 +3222,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
- rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
-
- rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00);
+ rtl_eri_set_bits(tp, 0xd4, 0x1f00);
+ rtl_eri_set_bits(tp, 0xdc, 0x001c);
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
@@ -3325,7 +3239,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+ rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
@@ -3362,7 +3276,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
- rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
+ rtl_eri_set_bits(tp, 0xd4, 0x1f80);
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
@@ -3373,7 +3287,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
+ rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@ -3465,7 +3379,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
- rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f90);
+ rtl_eri_set_bits(tp, 0xd4, 0x1f90);
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
@@ -3481,7 +3395,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
+ rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
@@ -3606,7 +3520,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
+ rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00);
/* disable EEE */
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
@@ -3682,7 +3596,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe098, 0xc302);
- rtl_udelay_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
+ rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
rtl8125_config_eee_mac(tp);
@@ -3849,7 +3763,7 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl8169_set_magic_reg(tp, tp->mac_version);
+ rtl8169_set_magic_reg(tp);
/* disable interrupt coalescing */
RTL_W16(tp, IntrMitigate, 0x0000);
@@ -3859,7 +3773,6 @@ static void rtl_hw_start(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
- tp->cp_cmd &= CPCMD_MASK;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
@@ -3881,6 +3794,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
+ rtl_set_rx_config_features(tp, tp->dev->features);
rtl_set_rx_mode(tp->dev);
rtl_irq_enable(tp);
}
@@ -3896,21 +3810,14 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
-{
- desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
- desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
-}
-
-static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
+static void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
desc->opts2 = 0;
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
-
- desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
+ WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE));
}
static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
@@ -3927,8 +3834,7 @@ static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
+ netdev_err(tp->dev, "Failed to map RX DMA!\n");
__free_pages(data, get_order(R8169_RX_BUF_SIZE));
return NULL;
}
@@ -3949,15 +3855,11 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
__free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
tp->Rx_databuff[i] = NULL;
- rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
+ tp->RxDescArray[i].addr = 0;
+ tp->RxDescArray[i].opts1 = 0;
}
}
-static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
-{
- desc->opts1 |= cpu_to_le32(RingEnd);
-}
-
static int rtl8169_rx_fill(struct rtl8169_private *tp)
{
unsigned int i;
@@ -3973,7 +3875,8 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
tp->Rx_databuff[i] = data;
}
- rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+ /* mark as last descriptor in the ring */
+ tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd);
return 0;
}
@@ -4022,10 +3925,45 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
- tp->cur_tx = tp->dirty_tx = 0;
netdev_reset_queue(tp->dev);
}
+static void rtl8169_hw_reset(struct rtl8169_private *tp)
+{
+ /* Give a racing hard_start_xmit a few cycles to complete. */
+ synchronize_rcu();
+
+ /* Disable interrupts */
+ rtl8169_irq_mask_and_ack(tp);
+
+ rtl_rx_close(tp);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
+ break;
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
+ rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
+ rtl_enable_rxdvgate(tp);
+ fsleep(2000);
+ break;
+ default:
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
+ fsleep(100);
+ break;
+ }
+
+ rtl_hw_reset(tp);
+
+ rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
+}
+
static void rtl_reset_work(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
@@ -4033,16 +3971,12 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_disable(&tp->napi);
netif_stop_queue(dev);
- synchronize_rcu();
rtl8169_hw_reset(tp);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i);
- rtl8169_tx_clear(tp);
- rtl8169_init_ring_indexes(tp);
-
napi_enable(&tp->napi);
rtl_hw_start(tp);
netif_wake_queue(dev);
@@ -4068,7 +4002,7 @@ static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
ret = dma_mapping_error(d, mapping);
if (unlikely(ret)) {
if (net_ratelimit())
- netif_err(tp, drv, tp->dev, "Failed to map TX data!\n");
+ netdev_err(tp->dev, "Failed to map TX data!\n");
return ret;
}
@@ -4139,25 +4073,20 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
u32 transport_offset = (u32)skb_transport_offset(skb);
- u32 mss = skb_shinfo(skb)->gso_size;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 mss = shinfo->gso_size;
if (mss) {
- switch (vlan_get_protocol(skb)) {
- case htons(ETH_P_IP):
+ if (shinfo->gso_type & SKB_GSO_TCPV4) {
opts[0] |= TD1_GTSENV4;
- break;
-
- case htons(ETH_P_IPV6):
+ } else if (shinfo->gso_type & SKB_GSO_TCPV6) {
if (skb_cow_head(skb, 0))
return false;
tcp_v6_gso_csum_prep(skb);
opts[0] |= TD1_GTSENV6;
- break;
-
- default:
+ } else {
WARN_ON_ONCE(1);
- break;
}
opts[0] |= transport_offset << GTTCPHO_SHIFT;
@@ -4239,7 +4168,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first = tp->TxDescArray + entry;
if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
- netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
+ if (net_ratelimit())
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -4277,8 +4207,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
- /* Force all memory writes to complete before notifying device */
- wmb();
+ /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */
+ smp_wmb();
tp->cur_tx += frags + 1;
@@ -4323,6 +4253,37 @@ err_stop_0:
return NETDEV_TX_BUSY;
}
+static unsigned int rtl_last_frag_len(struct sk_buff *skb)
+{
+ struct skb_shared_info *info = skb_shinfo(skb);
+ unsigned int nr_frags = info->nr_frags;
+
+ if (!nr_frags)
+ return UINT_MAX;
+
+ return skb_frag_size(info->frags + nr_frags - 1);
+}
+
+/* Workaround for hw issues with TSO on RTL8168evl */
+static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ /* IPv4 header has options field */
+ if (vlan_get_protocol(skb) == htons(ETH_P_IP) &&
+ ip_hdrlen(skb) > sizeof(struct iphdr))
+ features &= ~NETIF_F_ALL_TSO;
+
+ /* IPv4 TCP header has options field */
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 &&
+ tcp_hdrlen(skb) > sizeof(struct tcphdr))
+ features &= ~NETIF_F_ALL_TSO;
+
+ else if (rtl_last_frag_len(skb) <= 6)
+ features &= ~NETIF_F_ALL_TSO;
+
+ return features;
+}
+
static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
@@ -4331,6 +4292,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct rtl8169_private *tp = netdev_priv(dev);
if (skb_is_gso(skb)) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34)
+ features = rtl8168evl_fix_tso(skb, features);
+
if (transport_offset > GTTCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
@@ -4367,9 +4331,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_status_errs = pci_status_get_and_clear_errors(pdev);
- netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
- pci_cmd, pci_status_errs);
-
+ if (net_ratelimit())
+ netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
+ pci_cmd, pci_status_errs);
/*
* The recovery sequence below admits a very elaborated explanation:
* - it seems to work;
@@ -4465,15 +4429,17 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
{
- unsigned int cur_rx, rx_left;
- unsigned int count;
+ unsigned int cur_rx, rx_left, count;
+ struct device *d = tp_to_dev(tp);
cur_rx = tp->cur_rx;
for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
- unsigned int entry = cur_rx % NUM_RX_DESC;
- const void *rx_buf = page_address(tp->Rx_databuff[entry]);
+ unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC;
struct RxDesc *desc = tp->RxDescArray + entry;
+ struct sk_buff *skb;
+ const void *rx_buf;
+ dma_addr_t addr;
u32 status;
status = le32_to_cpu(desc->opts1);
@@ -4487,69 +4453,65 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
dma_rmb();
if (unlikely(status & RxRES)) {
- netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
- status);
+ if (net_ratelimit())
+ netdev_warn(dev, "Rx ERROR. status = %08x\n",
+ status);
dev->stats.rx_errors++;
if (status & (RxRWT | RxRUNT))
dev->stats.rx_length_errors++;
if (status & RxCRC)
dev->stats.rx_crc_errors++;
- if (status & (RxRUNT | RxCRC) && !(status & RxRWT) &&
- dev->features & NETIF_F_RXALL) {
- goto process_pkt;
- }
- } else {
- unsigned int pkt_size;
- struct sk_buff *skb;
-
-process_pkt:
- pkt_size = status & GENMASK(13, 0);
- if (likely(!(dev->features & NETIF_F_RXFCS)))
- pkt_size -= ETH_FCS_LEN;
- /*
- * The driver does not support incoming fragmented
- * frames. They are seen as a symptom of over-mtu
- * sized frames.
- */
- if (unlikely(rtl8169_fragmented_frame(status))) {
- dev->stats.rx_dropped++;
- dev->stats.rx_length_errors++;
- goto release_descriptor;
- }
- skb = napi_alloc_skb(&tp->napi, pkt_size);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ if (!(dev->features & NETIF_F_RXALL))
goto release_descriptor;
- }
+ else if (status & RxRWT || !(status & (RxRUNT | RxCRC)))
+ goto release_descriptor;
+ }
+
+ pkt_size = status & GENMASK(13, 0);
+ if (likely(!(dev->features & NETIF_F_RXFCS)))
+ pkt_size -= ETH_FCS_LEN;
- dma_sync_single_for_cpu(tp_to_dev(tp),
- le64_to_cpu(desc->addr),
- pkt_size, DMA_FROM_DEVICE);
- prefetch(rx_buf);
- skb_copy_to_linear_data(skb, rx_buf, pkt_size);
- skb->tail += pkt_size;
- skb->len = pkt_size;
+ /* The driver does not support incoming fragmented frames.
+ * They are seen as a symptom of over-mtu sized frames.
+ */
+ if (unlikely(rtl8169_fragmented_frame(status))) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_length_errors++;
+ goto release_descriptor;
+ }
- dma_sync_single_for_device(tp_to_dev(tp),
- le64_to_cpu(desc->addr),
- pkt_size, DMA_FROM_DEVICE);
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ goto release_descriptor;
+ }
- rtl8169_rx_csum(skb, status);
- skb->protocol = eth_type_trans(skb, dev);
+ addr = le64_to_cpu(desc->addr);
+ rx_buf = page_address(tp->Rx_databuff[entry]);
- rtl8169_rx_vlan_tag(desc, skb);
+ dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
+ prefetch(rx_buf);
+ skb_copy_to_linear_data(skb, rx_buf, pkt_size);
+ skb->tail += pkt_size;
+ skb->len = pkt_size;
+ dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
- if (skb->pkt_type == PACKET_MULTICAST)
- dev->stats.multicast++;
+ rtl8169_rx_csum(skb, status);
+ skb->protocol = eth_type_trans(skb, dev);
- napi_gro_receive(&tp->napi, skb);
+ rtl8169_rx_vlan_tag(desc, skb);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
+
+ napi_gro_receive(&tp->napi, skb);
+
+ u64_stats_update_begin(&tp->rx_stats.syncp);
+ tp->rx_stats.packets++;
+ tp->rx_stats.bytes += pkt_size;
+ u64_stats_update_end(&tp->rx_stats.syncp);
- u64_stats_update_begin(&tp->rx_stats.syncp);
- tp->rx_stats.packets++;
- tp->rx_stats.bytes += pkt_size;
- u64_stats_update_end(&tp->rx_stats.syncp);
- }
release_descriptor:
rtl8169_mark_to_asic(desc);
}
@@ -4665,25 +4627,21 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
return 0;
}
-static void rtl8169_down(struct net_device *dev)
+static void rtl8169_down(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
+ rtl_lock_work(tp);
- phy_stop(tp->phydev);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+ phy_stop(tp->phydev);
napi_disable(&tp->napi);
- netif_stop_queue(dev);
rtl8169_hw_reset(tp);
- /* Give a racing hard_start_xmit a few cycles to complete. */
- synchronize_rcu();
-
- rtl8169_tx_clear(tp);
-
- rtl8169_rx_clear(tp);
-
rtl_pll_power_down(tp);
+
+ rtl_unlock_work(tp);
}
static int rtl8169_close(struct net_device *dev)
@@ -4696,12 +4654,9 @@ static int rtl8169_close(struct net_device *dev)
/* Update counters before going down */
rtl8169_update_counters(tp);
- rtl_lock_work(tp);
- /* Clear all task flags */
- bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
-
- rtl8169_down(dev);
- rtl_unlock_work(tp);
+ netif_stop_queue(dev);
+ rtl8169_down(tp);
+ rtl8169_rx_clear(tp);
cancel_work_sync(&tp->wk.work);
@@ -4779,8 +4734,7 @@ static int rtl_open(struct net_device *dev)
rtl_hw_start(tp);
- if (!rtl8169_init_counter_offsets(tp))
- netif_warn(tp, hw, dev, "counter reset/update failed\n");
+ rtl8169_init_counter_offsets(tp);
phy_start(tp->phydev);
netif_start_queue(dev);
@@ -4856,44 +4810,30 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_put_noidle(&pdev->dev);
}
-static void rtl8169_net_suspend(struct net_device *dev)
+static void rtl8169_net_suspend(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (!netif_running(dev))
+ if (!netif_running(tp->dev))
return;
- phy_stop(tp->phydev);
- netif_device_detach(dev);
-
- rtl_lock_work(tp);
- napi_disable(&tp->napi);
- /* Clear all task flags */
- bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
-
- rtl_unlock_work(tp);
-
- rtl_pll_power_down(tp);
+ netif_device_detach(tp->dev);
+ rtl8169_down(tp);
}
#ifdef CONFIG_PM
-static int rtl8169_suspend(struct device *device)
+static int __maybe_unused rtl8169_suspend(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
- rtl8169_net_suspend(dev);
+ rtl8169_net_suspend(tp);
clk_disable_unprepare(tp->clk);
return 0;
}
-static void __rtl8169_resume(struct net_device *dev)
+static void __rtl8169_resume(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- netif_device_attach(dev);
+ netif_device_attach(tp->dev);
rtl_pll_power_up(tp);
rtl8169_init_phy(tp);
@@ -4907,34 +4847,32 @@ static void __rtl8169_resume(struct net_device *dev)
rtl_unlock_work(tp);
}
-static int rtl8169_resume(struct device *device)
+static int __maybe_unused rtl8169_resume(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
- rtl_rar_set(tp, dev->dev_addr);
+ rtl_rar_set(tp, tp->dev->dev_addr);
clk_prepare_enable(tp->clk);
- if (netif_running(dev))
- __rtl8169_resume(dev);
+ if (netif_running(tp->dev))
+ __rtl8169_resume(tp);
return 0;
}
static int rtl8169_runtime_suspend(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
if (!tp->TxDescArray)
return 0;
rtl_lock_work(tp);
- __rtl8169_set_wol(tp, WAKE_ANY);
+ __rtl8169_set_wol(tp, WAKE_PHY);
rtl_unlock_work(tp);
- rtl8169_net_suspend(dev);
+ rtl8169_net_suspend(tp);
/* Update counters before going runtime suspend */
rtl8169_update_counters(tp);
@@ -4944,10 +4882,9 @@ static int rtl8169_runtime_suspend(struct device *device)
static int rtl8169_runtime_resume(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
- rtl_rar_set(tp, dev->dev_addr);
+ rtl_rar_set(tp, tp->dev->dev_addr);
if (!tp->TxDescArray)
return 0;
@@ -4956,40 +4893,28 @@ static int rtl8169_runtime_resume(struct device *device)
__rtl8169_set_wol(tp, tp->saved_wolopts);
rtl_unlock_work(tp);
- __rtl8169_resume(dev);
+ __rtl8169_resume(tp);
return 0;
}
static int rtl8169_runtime_idle(struct device *device)
{
- struct net_device *dev = dev_get_drvdata(device);
+ struct rtl8169_private *tp = dev_get_drvdata(device);
- if (!netif_running(dev) || !netif_carrier_ok(dev))
+ if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
pm_schedule_suspend(device, 10000);
return -EBUSY;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
- .suspend = rtl8169_suspend,
- .resume = rtl8169_resume,
- .freeze = rtl8169_suspend,
- .thaw = rtl8169_resume,
- .poweroff = rtl8169_suspend,
- .restore = rtl8169_resume,
- .runtime_suspend = rtl8169_runtime_suspend,
- .runtime_resume = rtl8169_runtime_resume,
- .runtime_idle = rtl8169_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
+ SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
+ rtl8169_runtime_idle)
};
-#define RTL8169_PM_OPS (&rtl8169_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define RTL8169_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
+#endif /* CONFIG_PM */
static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
{
@@ -5010,13 +4935,12 @@ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
static void rtl_shutdown(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = pci_get_drvdata(pdev);
- rtl8169_net_suspend(dev);
+ rtl8169_net_suspend(tp);
/* Restore original MAC address */
- rtl_rar_set(tp, dev->perm_addr);
+ rtl_rar_set(tp, tp->dev->perm_addr);
rtl8169_hw_reset(tp);
@@ -5033,24 +4957,20 @@ static void rtl_shutdown(struct pci_dev *pdev)
static void rtl_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = pci_get_drvdata(pdev);
- if (r8168_check_dash(tp))
- rtl8168_driver_stop(tp);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
- netif_napi_del(&tp->napi);
+ unregister_netdev(tp->dev);
- unregister_netdev(dev);
- mdiobus_unregister(tp->phydev->mdio.bus);
+ if (r8168_check_dash(tp))
+ rtl8168_driver_stop(tp);
rtl_release_firmware(tp);
- if (pci_dev_run_wake(pdev))
- pm_runtime_get_noresume(&pdev->dev);
-
/* restore original MAC address */
- rtl_rar_set(tp, dev->perm_addr);
+ rtl_rar_set(tp, tp->dev->perm_addr);
}
static const struct net_device_ops rtl_netdev_ops = {
@@ -5132,9 +5052,9 @@ DECLARE_RTL_COND(rtl_link_list_ready_cond)
return RTL_R8(tp, MCU) & LINK_LIST_RDY;
}
-DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp)
{
- return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+ rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
@@ -5179,20 +5099,19 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
- ret = mdiobus_register(new_bus);
+ ret = devm_mdiobus_register(new_bus);
if (ret)
return ret;
tp->phydev = mdiobus_get_phy(new_bus, 0);
if (!tp->phydev) {
- mdiobus_unregister(new_bus);
return -ENODEV;
} else if (!tp->phydev->drv) {
/* Most chip versions fail with the genphy driver.
* Therefore ensure that the dedicated PHY driver is loaded.
*/
- dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
- mdiobus_unregister(new_bus);
+ dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n",
+ tp->phydev->phy_id);
return -EUNATCH;
}
@@ -5204,53 +5123,34 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
- tp->ocp_base = OCP_STD_PHY_BASE;
-
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
- return;
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
- return;
+ rtl_enable_rxdvgate(tp);
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
+ r8168g_wait_ll_share_fifo_ready(tp);
r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
-
- rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+ r8168g_wait_ll_share_fifo_ready(tp);
}
static void rtl_hw_init_8125(struct rtl8169_private *tp)
{
- tp->ocp_base = OCP_STD_PHY_BASE;
-
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
- return;
+ rtl_enable_rxdvgate(tp);
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
-
- if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
- return;
+ r8168g_wait_ll_share_fifo_ready(tp);
r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
-
- rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+ r8168g_wait_ll_share_fifo_ready(tp);
}
static void rtl_hw_initialize(struct rtl8169_private *tp)
@@ -5365,9 +5265,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
tp->pci_dev = pdev;
- tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
tp->eee_adv = -1;
+ tp->ocp_base = OCP_STD_PHY_BASE;
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
@@ -5423,7 +5323,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version = chipset;
- tp->cp_cmd = RTL_R16(tp, CPlusCmd);
+ tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
@@ -5458,14 +5358,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_HIGHDMA;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- tp->cp_cmd |= RxChkSum;
- /* RTL8125 uses register RxConfig for VLAN offloading config */
- if (!rtl_is_8125(tp))
- tp->cp_cmd |= RxVlan;
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
@@ -5497,6 +5392,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ /* configure chip for default features */
+ rtl8169_set_features(dev, dev->features);
+
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
dev->max_mtu = jumbo_max;
@@ -5511,7 +5409,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!tp->counters)
return -ENOMEM;
- pci_set_drvdata(pdev, dev);
+ pci_set_drvdata(pdev, tp);
rc = r8169_mdio_register(tp);
if (rc)
@@ -5522,17 +5420,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = register_netdev(dev);
if (rc)
- goto err_mdio_unregister;
+ return rc;
- netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid,
- pci_irq_vector(pdev, 0));
+ netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
+ rtl_chip_infos[chipset].name, dev->dev_addr, xid,
+ pci_irq_vector(pdev, 0));
if (jumbo_max)
- netif_info(tp, probe, dev,
- "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
- jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
- "ok" : "ko");
+ netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
+ jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
+ "ok" : "ko");
if (r8168_check_dash(tp))
rtl8168_driver_start(tp);
@@ -5541,10 +5438,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pm_runtime_put_sync(&pdev->dev);
return 0;
-
-err_mdio_unregister:
- mdiobus_unregister(tp->phydev->mdio.bus);
- return rc;
}
static struct pci_driver rtl8169_pci_driver = {
@@ -5553,7 +5446,9 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
- .driver.pm = RTL8169_PM_OPS,
+#ifdef CONFIG_PM
+ .driver.pm = &rtl8169_pm_ops,
+#endif
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 067ad25553b9..a442bcf64b9c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1014,6 +1014,7 @@ static int ravb_phy_init(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
struct phy_device *phydev;
struct device_node *pn;
+ phy_interface_t iface;
int err;
priv->link = 0;
@@ -1032,8 +1033,13 @@ static int ravb_phy_init(struct net_device *ndev)
}
pn = of_node_get(np);
}
- phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
- priv->phy_interface);
+
+ iface = priv->phy_interface;
+ if (priv->chip_id != RCAR_GEN2 && phy_interface_mode_is_rgmii(iface)) {
+ /* ravb_set_delay_mode() takes care of internal delay mode */
+ iface = PHY_INTERFACE_MODE_RGMII;
+ }
+ phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
of_node_put(pn);
if (!phydev) {
netdev_err(ndev, "failed to connect PHY\n");
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 8ed73f44405d..f45331ed90b0 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2472,7 +2472,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
}
/* Packet transmit function */
-static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 128ee7cda1ed..65c98837ec45 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -610,12 +610,9 @@ static int ether3_rx(struct net_device *dev, unsigned int maxcnt)
ether3_readbuffer(dev, addrs+2, 12);
if (next_ptr < RX_START || next_ptr >= RX_END) {
- int i;
printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head);
printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8);
- for (i = 2; i < 14; i++)
- printk("%02X ", addrs[i]);
- printk("\n");
+ printk("%pM %pM\n", addrs + 2, addrs + 8);
next_ptr = priv(dev)->rx_head;
break;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 3f16bd807c6e..4b0e3695a71a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -553,7 +553,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
@@ -1281,13 +1281,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_check_datapath_caps = false;
}
- if (nic_data->must_realloc_vis) {
+ if (efx->must_realloc_vis) {
/* We cannot let the number of VIs change now */
rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
nic_data->n_allocated_vis);
if (rc)
return rc;
- nic_data->must_realloc_vis = false;
+ efx->must_realloc_vis = false;
}
if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
@@ -1326,16 +1326,15 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
#endif
/* All our allocations have been reset */
- nic_data->must_realloc_vis = true;
- nic_data->must_restore_rss_contexts = true;
- nic_data->must_restore_filters = true;
+ efx->must_realloc_vis = true;
+ efx_mcdi_filter_table_reset_mc_allocations(efx);
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
#ifdef CONFIG_SFC_SRIOV
if (nic_data->vf)
for (i = 0; i < efx->vf_count; i++)
@@ -1820,6 +1819,7 @@ static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
}
static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+ __must_hold(&efx->stats_lock)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -2389,6 +2389,86 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
+static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int enabled, implemented;
+ bool want_workaround_26807;
+ int rc;
+
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+ if (rc == -ENOSYS) {
+ /* GET_WORKAROUNDS was implemented before this workaround,
+ * thus it must be unavailable in this firmware.
+ */
+ nic_data->workaround_26807 = false;
+ return 0;
+ }
+ if (rc)
+ return rc;
+ want_workaround_26807 =
+ implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807;
+ nic_data->workaround_26807 =
+ !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+ if (want_workaround_26807 && !nic_data->workaround_26807) {
+ unsigned int flags;
+
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG26807,
+ true, &flags);
+ if (!rc) {
+ if (flags &
+ 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+ netif_info(efx, drv, efx->net_dev,
+ "other functions on NIC have been reset\n");
+
+ /* With MCFW v4.6.x and earlier, the
+ * boot count will have incremented,
+ * so re-read the warm_boot_count
+ * value now to ensure this function
+ * doesn't think it has changed next
+ * time it checks.
+ */
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0) {
+ nic_data->warm_boot_count = rc;
+ rc = 0;
+ }
+ }
+ nic_data->workaround_26807 = true;
+ } else if (rc == -EPERM) {
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc = efx_ef10_probe_multicast_chaining(efx);
+ struct efx_mcdi_filter_vlan *vlan;
+
+ if (rc)
+ return rc;
+ rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
+
+ if (rc)
+ return rc;
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+ return 0;
+
+fail_add_vlan:
+ efx_mcdi_filter_table_remove(efx);
+ return rc;
+}
+
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -2464,75 +2544,14 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
struct efx_ef10_nic_data *nic_data;
- unsigned int enabled, implemented;
bool use_v2, cut_thru;
- int rc;
nic_data = efx->nic_data;
use_v2 = nic_data->datapath_caps2 &
1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
cut_thru = !(nic_data->datapath_caps &
1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
- rc = efx_mcdi_ev_init(channel, cut_thru, use_v2);
-
- /* IRQ return is ignored */
- if (channel->channel || rc)
- return rc;
-
- /* Successfully created event queue on channel 0 */
- rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
- if (rc == -ENOSYS) {
- /* GET_WORKAROUNDS was implemented before this workaround,
- * thus it must be unavailable in this firmware.
- */
- nic_data->workaround_26807 = false;
- rc = 0;
- } else if (rc) {
- goto fail;
- } else {
- nic_data->workaround_26807 =
- !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
-
- if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
- !nic_data->workaround_26807) {
- unsigned int flags;
-
- rc = efx_mcdi_set_workaround(efx,
- MC_CMD_WORKAROUND_BUG26807,
- true, &flags);
-
- if (!rc) {
- if (flags &
- 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
- netif_info(efx, drv, efx->net_dev,
- "other functions on NIC have been reset\n");
-
- /* With MCFW v4.6.x and earlier, the
- * boot count will have incremented,
- * so re-read the warm_boot_count
- * value now to ensure this function
- * doesn't think it has changed next
- * time it checks.
- */
- rc = efx_ef10_get_warm_boot_count(efx);
- if (rc >= 0) {
- nic_data->warm_boot_count = rc;
- rc = 0;
- }
- }
- nic_data->workaround_26807 = true;
- } else if (rc == -EPERM) {
- rc = 0;
- }
- }
- }
-
- if (!rc)
- return 0;
-
-fail:
- efx_mcdi_ev_fini(channel);
- return rc;
+ return efx_mcdi_ev_init(channel, cut_thru, use_v2);
}
static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
@@ -3100,16 +3119,15 @@ void efx_ef10_handle_drain_event(struct efx_nic *efx)
static int efx_ef10_fini_dmaq(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
int pending;
/* If the MC has just rebooted, the TX/RX queues will have already been
* torn down, but efx->active_queues needs to be set to zero.
*/
- if (nic_data->must_realloc_vis) {
+ if (efx->must_realloc_vis) {
atomic_set(&efx->active_queues, 0);
return 0;
}
@@ -3158,22 +3176,22 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
- rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc)
goto restore_filters;
ether_addr_copy(mac_old, nic_data->vport_mac);
- rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ rc = efx_ef10_vport_del_mac(efx, efx->vport_id,
nic_data->vport_mac);
if (rc)
goto restore_vadaptor;
- rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
+ rc = efx_ef10_vport_add_mac(efx, efx->vport_id,
efx->net_dev->dev_addr);
if (!rc) {
ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
} else {
- rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
+ rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old);
if (rc2) {
/* Failed to add original MAC, so clear vport_mac */
eth_zero_addr(nic_data->vport_mac);
@@ -3182,12 +3200,12 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
}
restore_vadaptor:
- rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
if (rc2)
goto reset_nic;
restore_filters:
down_write(&efx->filter_sem);
- rc2 = efx_mcdi_filter_table_probe(efx);
+ rc2 = efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -3211,7 +3229,6 @@ reset_nic:
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
bool was_enabled = efx->port_enabled;
int rc;
@@ -3225,11 +3242,11 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
+ efx->vport_id);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
sizeof(inbuf), NULL, 0, NULL);
- efx_mcdi_filter_table_probe(efx);
+ efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -3239,6 +3256,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
#ifdef CONFIG_SFC_SRIOV
if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
if (rc == -EPERM) {
@@ -3961,6 +3979,35 @@ out_unlock:
return rc;
}
+/* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf,
+ size_t len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return scnprintf(buf, len, " rx%x tx%x",
+ nic_data->rx_dpcpu_fw_id,
+ nic_data->tx_dpcpu_fw_id);
+}
+
+static unsigned int ef10_check_caps(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset)
+{
+ const struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ switch (offset) {
+ case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST):
+ return nic_data->datapath_caps & BIT_ULL(flag);
+ case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST):
+ return nic_data->datapath_caps2 & BIT_ULL(flag);
+ default:
+ return 0;
+ }
+}
+
#define EF10_OFFLOAD_FEATURES \
(NETIF_F_IP_CSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -4027,7 +4074,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
@@ -4073,6 +4120,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4139,7 +4188,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_process = efx_ef10_ev_process,
.ev_read_ack = efx_ef10_ev_read_ack,
.ev_test_generate = efx_ef10_ev_test_generate,
- .filter_table_probe = efx_mcdi_filter_table_probe,
+ .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
@@ -4208,4 +4257,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
.rx_hash_key_size = 40,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
};
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 4580b30caae1..21fa6c0e8873 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -232,15 +232,14 @@ fail:
static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 port_flags;
int rc;
- rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
if (rc)
goto fail_vadaptor_alloc;
- rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id,
+ rc = efx_ef10_vadaptor_query(efx, efx->vport_id,
&port_flags, NULL, NULL);
if (rc)
goto fail_vadaptor_query;
@@ -281,11 +280,11 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
- EFX_EF10_NO_VLAN, &nic_data->vport_id);
+ EFX_EF10_NO_VLAN, &efx->vport_id);
if (rc)
goto fail2;
- rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+ rc = efx_ef10_vport_add_mac(efx, efx->vport_id, net_dev->dev_addr);
if (rc)
goto fail3;
ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
@@ -296,11 +295,11 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
return 0;
fail4:
- efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+ efx_ef10_vport_del_mac(efx, efx->vport_id, nic_data->vport_mac);
eth_zero_addr(nic_data->vport_mac);
fail3:
- efx_ef10_vport_free(efx, nic_data->vport_id);
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx_ef10_vport_free(efx, efx->vport_id);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
fail2:
efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
fail1:
@@ -355,22 +354,22 @@ void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
efx_ef10_sriov_free_vf_vswitching(efx);
- efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+ efx_ef10_vadaptor_free(efx, efx->vport_id);
- if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+ if (efx->vport_id == EVB_PORT_ID_ASSIGNED)
return; /* No vswitch was ever created */
if (!is_zero_ether_addr(nic_data->vport_mac)) {
- efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ efx_ef10_vport_del_mac(efx, efx->vport_id,
efx->net_dev->dev_addr);
eth_zero_addr(nic_data->vport_mac);
}
- efx_ef10_vport_free(efx, nic_data->vport_id);
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+ efx_ef10_vport_free(efx, efx->vport_id);
+ efx->vport_id = EVB_PORT_ID_ASSIGNED;
/* Only free the vswitch if no VFs are assigned */
if (!pci_vfs_assigned(efx->pci_dev))
- efx_ef10_vswitch_free(efx, nic_data->vport_id);
+ efx_ef10_vswitch_free(efx, efx->vport_id);
}
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 15c731d04065..a8cc3881edce 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1425,23 +1425,16 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
le16_to_cpu(ver_words[2]),
le16_to_cpu(ver_words[3]));
- /* EF10 may have multiple datapath firmware variants within a
- * single version. Report which variants are running.
- */
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- offset += scnprintf(buf + offset, len - offset, " rx%x tx%x",
- nic_data->rx_dpcpu_fw_id,
- nic_data->tx_dpcpu_fw_id);
+ if (efx->type->print_additional_fwver)
+ offset += efx->type->print_additional_fwver(efx, buf + offset,
+ len - offset);
- /* It's theoretically possible for the string to exceed 31
- * characters, though in practice the first three version
- * components are short enough that this doesn't happen.
- */
- if (WARN_ON(offset >= len))
- buf[0] = 0;
- }
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
return;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 54a45010b576..b107e4c00285 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -326,6 +326,18 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+#define MCDI_CAPABILITY(field) \
+ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _LBN
+
+#define MCDI_CAPABILITY_OFST(field) \
+ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST
+
+/* field is FLAGS1 or FLAGS2 */
+#define efx_has_cap(efx, flag, field) \
+ efx->type->check_caps(efx, \
+ MCDI_CAPABILITY(flag), \
+ MCDI_CAPABILITY_OFST(field))
+
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 4310ae5bd898..455a62814fb9 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -186,7 +186,6 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
struct efx_rss_context *ctx,
bool replacing)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 flags = spec->flags;
memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
@@ -211,7 +210,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -332,7 +331,6 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct efx_mcdi_filter_table *table;
struct efx_filter_spec *saved_spec;
struct efx_rss_context *ctx = NULL;
@@ -461,7 +459,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
ctx, replacing);
- if (rc == -EINVAL && nic_data->must_realloc_vis)
+ if (rc == -EINVAL && efx->must_realloc_vis)
/* The MC rebooted under us, causing it to reject our filter
* insertion as pointing to an invalid VI (spec->dmaq_id).
*/
@@ -813,7 +811,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
enum efx_encap_type encap_type,
bool multicast, bool rollback)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
@@ -830,8 +828,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
efx_filter_set_uc_def(&spec);
if (encap_type) {
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
efx_filter_set_encap_type(&spec, encap_type);
else
/*
@@ -899,7 +896,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
*id = efx_mcdi_filter_get_unsafe_id(rc);
- if (!nic_data->workaround_26807 && !encap_type) {
+ if (!table->mc_chaining && !encap_type) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
@@ -965,7 +962,6 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
struct efx_mcdi_filter_vlan *vlan)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
/*
* Do not install unspecified VID if VLAN filtering is enabled.
@@ -1012,11 +1008,10 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
* If changing promiscuous state with cascaded multicast filters, remove
* old filters first, so that packets are dropped rather than duplicated
*/
- if (nic_data->workaround_26807 &&
- table->mc_promisc_last != table->mc_promisc)
+ if (table->mc_chaining && table->mc_promisc_last != table->mc_promisc)
efx_mcdi_filter_remove_old(efx);
if (table->mc_promisc) {
- if (nic_data->workaround_26807) {
+ if (table->mc_chaining) {
/*
* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
@@ -1051,7 +1046,7 @@ static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
*/
if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
- if (nic_data->workaround_26807)
+ if (table->mc_chaining)
efx_mcdi_filter_remove_old(efx);
if (efx_mcdi_filter_insert_def(efx, vlan,
EFX_ENCAP_TYPE_NONE,
@@ -1288,12 +1283,10 @@ efx_mcdi_filter_table_probe_matches(struct efx_nic *efx,
return 0;
}
-int efx_mcdi_filter_table_probe(struct efx_nic *efx)
+int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
struct efx_mcdi_filter_table *table;
- struct efx_mcdi_filter_vlan *vlan;
int rc;
if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
@@ -1306,12 +1299,12 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx)
if (!table)
return -ENOMEM;
+ table->mc_chaining = multicast_chaining;
table->rx_match_count = 0;
rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
if (rc)
goto fail;
- if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
if (rc)
goto fail;
@@ -1342,22 +1335,22 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx)
efx->filter_state = table;
- list_for_each_entry(vlan, &nic_data->vlan_list, list) {
- rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
- if (rc)
- goto fail_add_vlan;
- }
-
return 0;
-
-fail_add_vlan:
- efx_mcdi_filter_cleanup_vlans(efx);
- efx->filter_state = NULL;
fail:
kfree(table);
return rc;
}
+void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx)
+{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
+
+ if (table) {
+ table->must_restore_filters = true;
+ table->must_restore_rss_contexts = true;
+ }
+}
+
/*
* Caller must hold efx->filter_sem for read if race against
* efx_mcdi_filter_table_remove() is possible
@@ -1365,7 +1358,6 @@ fail:
void efx_mcdi_filter_table_restore(struct efx_nic *efx)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int invalid_filters = 0, failed = 0;
struct efx_mcdi_filter_vlan *vlan;
struct efx_filter_spec *spec;
@@ -1377,10 +1369,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
WARN_ON(!rwsem_is_locked(&efx->filter_sem));
- if (!nic_data->must_restore_filters)
- return;
-
- if (!table)
+ if (!table || !table->must_restore_filters)
return;
down_write(&table->lock);
@@ -1456,7 +1445,7 @@ not_restored:
netif_err(efx, hw, efx->net_dev,
"unable to restore %u filters\n", failed);
else
- nic_data->must_restore_filters = false;
+ table->must_restore_filters = false;
}
void efx_mcdi_filter_table_remove(struct efx_nic *efx)
@@ -1921,7 +1910,6 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
u32 alloc_type = exclusive ?
@@ -1939,12 +1927,11 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
return 0;
}
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+ if (efx_has_cap(efx, RX_RSS_LIMITED, FLAGS1))
return -EOPNOTSUPP;
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- nic_data->vport_id);
+ efx->vport_id);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
@@ -1961,8 +1948,7 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
if (context_size)
*context_size = rss_spread;
- if (nic_data->datapath_caps &
- 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
+ if (efx_has_cap(efx, ADDITIONAL_RSS_MODES, FLAGS1))
efx_mcdi_set_rss_context_flags(efx, ctx);
return 0;
@@ -2030,14 +2016,14 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
unsigned *context_size)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
context_size);
if (rc != 0)
return rc;
- nic_data->rx_rss_context_exclusive = false;
+ table->rx_rss_context_exclusive = false;
efx_set_default_rx_indir_table(efx, &efx->rss_context);
return 0;
}
@@ -2046,12 +2032,12 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
const u32 *rx_indir_table,
const u8 *key)
{
+ struct efx_mcdi_filter_table *table = efx->filter_state;
u32 old_rx_rss_context = efx->rss_context.context_id;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
- !nic_data->rx_rss_context_exclusive) {
+ !table->rx_rss_context_exclusive) {
rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
NULL);
if (rc == -EOPNOTSUPP)
@@ -2068,7 +2054,7 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
if (efx->rss_context.context_id != old_rx_rss_context &&
old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
- nic_data->rx_rss_context_exclusive = true;
+ table->rx_rss_context_exclusive = true;
if (rx_indir_table != efx->rss_context.rx_indir_table)
memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
sizeof(efx->rss_context.rx_indir_table));
@@ -2182,13 +2168,13 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_filter_table *table = efx->filter_state;
struct efx_rss_context *ctx;
int rc;
WARN_ON(!mutex_is_locked(&efx->rss_lock));
- if (!nic_data->must_restore_rss_contexts)
+ if (!table->must_restore_rss_contexts)
return;
list_for_each_entry(ctx, &efx->rss_context.list, list) {
@@ -2204,7 +2190,7 @@ void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
"; RSS filters may fail to be applied\n",
ctx->user_id, rc);
}
- nic_data->must_restore_rss_contexts = false;
+ table->must_restore_rss_contexts = false;
}
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index 1837f4f5d661..03a8bf74c733 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -55,6 +55,8 @@ struct efx_mcdi_filter_table {
u32 rx_match_mcdi_flags[
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count;
+ /* Our RSS context is exclusive (as opposed to shared) */
+ bool rx_rss_context_exclusive;
struct rw_semaphore lock; /* Protects entries */
struct {
@@ -75,14 +77,27 @@ struct efx_mcdi_filter_table {
/* Whether in multicast promiscuous mode when last changed */
bool mc_promisc_last;
bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
+ /* RSS contexts have yet to be restored after MC reboot */
+ bool must_restore_rss_contexts;
+ /* filters have yet to be restored after MC reboot */
+ bool must_restore_filters;
+ /* Multicast filter chaining allows less-specific filters to receive
+ * multicast packets that matched more-specific filters. Early EF10
+ * firmware didn't support this (SF bug 26807); if mc_chaining == false
+ * then we still subscribe the dev_mc_list even when mc_promisc to
+ * prevent another VI stealing the traffic.
+ */
+ bool mc_chaining;
bool vlan_filter;
struct list_head vlan_list;
};
-int efx_mcdi_filter_table_probe(struct efx_nic *efx);
+int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining);
void efx_mcdi_filter_table_remove(struct efx_nic *efx);
void efx_mcdi_filter_table_restore(struct efx_nic *efx);
+void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx);
+
/*
* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
index dcfe78b0fa5a..962d8395d958 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.c
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -168,21 +168,18 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- struct efx_ef10_nic_data *nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc, i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
- nic_data = efx->nic_data;
-
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
@@ -276,7 +273,6 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
dma_addr_t dma_addr;
size_t inlen;
int rc;
@@ -295,7 +291,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ab5227b13ae6..b807871d8f69 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -722,11 +722,8 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
- }
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
NULL, 0, NULL);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b084e623b5f4..1afb58feb9ab 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -887,8 +887,10 @@ struct efx_async_filter_insertion {
* @rss_context: Main RSS context. Its @list member is the head of the list of
* RSS contexts created by user requests
* @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
* @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
* acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
@@ -1044,10 +1046,12 @@ struct efx_nic {
bool rx_scatter;
struct efx_rss_context rss_context;
struct mutex rss_lock;
+ u32 vport_id;
unsigned int_error_count;
unsigned long int_error_expire;
+ bool must_realloc_vis;
bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
@@ -1292,6 +1296,7 @@ struct efx_udp_tunnel {
* @udp_tnl_add_port: Add a UDP tunnel port
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @udp_tnl_del_port: Remove a UDP tunnel port
+ * @print_additional_fwver: Dump NIC-specific additional FW version info
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1352,6 +1357,9 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
+ unsigned int (*check_caps)(const struct efx_nic *efx,
+ u8 flag,
+ u32 offset);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
void (*mcdi_request)(struct efx_nic *efx,
@@ -1462,6 +1470,8 @@ struct efx_nic_type {
int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
+ size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
+ size_t len);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 6670fda8f35a..8f73c5d996eb 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -360,10 +360,6 @@ enum {
* @warm_boot_count: Last seen MC warm boot count
* @vi_base: Absolute index of first VI in this function
* @n_allocated_vis: Number of VIs allocated to this function
- * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
- * @must_restore_rss_contexts: Flag: RSS contexts have yet to be restored after
- * MC reboot
- * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
* @n_piobufs: Number of PIO buffers allocated to this function
* @wc_membase: Base address of write-combining mapping of the memory BAR
* @pio_write_base: Base address for writing PIO buffers
@@ -372,7 +368,6 @@ enum {
* @piobuf_size: size of a single PIO buffer
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
- * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @workaround_26807: Flag: firmware supports workaround for bug 26807
@@ -385,7 +380,6 @@ enum {
* %MC_CMD_GET_CAPABILITIES response)
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
- * @vport_id: The function's vport ID, only relevant for PFs
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
* @pf_index: The number for this PF, or the parent PF if this is a VF
#ifdef CONFIG_SFC_SRIOV
@@ -404,16 +398,12 @@ struct efx_ef10_nic_data {
u16 warm_boot_count;
unsigned int vi_base;
unsigned int n_allocated_vis;
- bool must_realloc_vis;
- bool must_restore_rss_contexts;
- bool must_restore_filters;
unsigned int n_piobufs;
void __iomem *wc_membase, *pio_write_base;
unsigned int pio_write_vi_base;
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
u16 piobuf_size;
bool must_restore_piobufs;
- bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool workaround_26807;
@@ -423,7 +413,6 @@ struct efx_ef10_nic_data {
u32 datapath_caps2;
unsigned int rx_dpcpu_fw_id;
unsigned int tx_dpcpu_fw_id;
- unsigned int vport_id;
bool must_probe_vswitching;
unsigned int pf_index;
u8 port_id[ETH_ALEN];
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 59b4f16896a8..04c7283d205e 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -352,12 +352,7 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) &&
- (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN)
- ));
+ return efx_has_cap(efx, TX_MAC_TIMESTAMPING, FLAGS2);
}
/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 260352d97d9d..c01916cff507 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -308,6 +308,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + rx_buf->len;
xdp.rxq = &rx_queue->xdp_rxq_info;
+ xdp.frame_sz = efx->rx_page_buf_step;
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
@@ -328,7 +329,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
case XDP_TX:
/* Buffer ownership passes to tx on success. */
- xdpf = convert_to_xdp_frame(&xdp);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) {
efx_free_rx_buffers(rx_queue, rx_buf, 1);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index baa464161626..891e9fb6abec 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -948,6 +948,13 @@ fail:
#endif /* CONFIG_SFC_MTD */
+static unsigned int siena_check_caps(const struct efx_nic *efx,
+ u8 flag, u32 offset)
+{
+ /* Siena did not support MC_CMD_GET_CAPABILITIES */
+ return 0;
+}
+
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
@@ -1086,4 +1093,5 @@ const struct efx_nic_type siena_a0_nic_type = {
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
.rx_hash_key_size = 16,
+ .check_caps = siena_check_caps,
};
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 9e1c3752b200..4d2d91ec8b41 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -28,7 +28,7 @@ config SMC9194
option if you have a DELL laptop with the docking station, or
another SMC9192/9194 based chipset. Say Y if you want it compiled
into the kernel, and read the file
- <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.rst>.
To compile this driver as a module, choose M here. The module
will be called smc9194.
@@ -44,7 +44,7 @@ config SMC91X
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
compiled into the kernel, and read the file
- <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.rst>.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index a5a0fb60193a..328bc38848bb 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -867,7 +867,7 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
{
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
- struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
u32 ret;
if (unlikely(!xdpf))
@@ -884,23 +884,28 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
struct xdp_buff *xdp)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- unsigned int len = xdp->data_end - xdp->data;
+ unsigned int sync, len = xdp->data_end - xdp->data;
u32 ret = NETSEC_XDP_PASS;
+ struct page *page;
int err;
u32 act;
act = bpf_prog_run_xdp(prog, xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
+ sync = max(sync, len);
+
switch (act) {
case XDP_PASS:
ret = NETSEC_XDP_PASS;
break;
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
- if (ret != NETSEC_XDP_TX)
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ if (ret != NETSEC_XDP_TX) {
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
+ }
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -908,9 +913,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len,
- true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
}
break;
default:
@@ -921,8 +925,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data), len, true);
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(dring->page_pool, page, sync, true);
break;
}
@@ -936,10 +940,14 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_rx_pkt_info rx_info;
enum dma_data_direction dma_dir;
struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
u16 xdp_xmit = 0;
u32 xdp_act = 0;
int done = 0;
+ xdp.rxq = &dring->xdp_rxq;
+ xdp.frame_sz = PAGE_SIZE;
+
rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
dma_dir = page_pool_get_dma_dir(dring->page_pool);
@@ -953,7 +961,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
- struct xdp_buff xdp;
void *buf_addr;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
@@ -1002,7 +1009,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + pkt_len;
- xdp.rxq = &dring->xdp_rxq;
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 67ddf782d98a..f2638446b62e 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1394,7 +1394,7 @@ static int ave_stop(struct net_device *ndev)
return 0;
}
-static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ave_private *priv = netdev_priv(ndev);
u32 proc_idx, done_idx, ndesc, cmdsts;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index b46f8d2ae6d7..36bd2e18f23b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -196,6 +196,19 @@ config DWMAC_SUN8I
This selects Allwinner SoC glue layer support for the
stmmac device driver. This driver is used for H3/A83T/A64
EMAC ethernet controller.
+
+config DWMAC_IMX8
+ tristate "NXP IMX8 DWMAC support"
+ default ARCH_MXC
+ depends on OF && (ARCH_MXC || COMPILE_TEST)
+ select MFD_SYSCON
+ ---help---
+ Support for ethernet controller on NXP i.MX8 SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for i.MX8 series like
+ iMX8MP/iMX8DXL GMAC ethernet controller.
+
endif
config DWMAC_INTEL
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 5a6f265bc540..295615ab36a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -27,9 +27,10 @@ obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
+obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
-obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
-obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 6208a68a331d..127f75862962 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -473,6 +473,7 @@ struct mac_device_info {
unsigned int xlgmac;
unsigned int num_vlan;
u32 vlan_filter[32];
+ unsigned int promisc;
};
struct stmmac_rx_routing {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
new file mode 100644
index 000000000000..5010af7dab4a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwmac-imx.c - DWMAC Specific Glue layer for NXP imx8
+ *
+ * Copyright 2020 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GPR_ENET_QOS_INTF_MODE_MASK GENMASK(21, 16)
+#define GPR_ENET_QOS_INTF_SEL_MII (0x0 << 16)
+#define GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 16)
+#define GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 16)
+#define GPR_ENET_QOS_CLK_GEN_EN (0x1 << 19)
+#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
+#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
+
+struct imx_dwmac_ops {
+ u32 addr_width;
+ bool mac_rgmii_txclk_auto_adj;
+
+ int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
+};
+
+struct imx_priv_data {
+ struct device *dev;
+ struct clk *clk_tx;
+ struct clk *clk_mem;
+ struct regmap *intf_regmap;
+ u32 intf_reg_off;
+ bool rmii_refclk_ext;
+
+ const struct imx_dwmac_ops *ops;
+ struct plat_stmmacenet_data *plat_dat;
+};
+
+static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct imx_priv_data *dwmac = plat_dat->bsp_priv;
+ int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = GPR_ENET_QOS_INTF_SEL_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = GPR_ENET_QOS_INTF_SEL_RMII;
+ val |= (dwmac->rmii_refclk_ext ? 0 : GPR_ENET_QOS_CLK_TX_CLK_SEL);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = GPR_ENET_QOS_INTF_SEL_RGMII |
+ GPR_ENET_QOS_RGMII_EN;
+ break;
+ default:
+ pr_debug("imx dwmac doesn't support %d interface\n",
+ plat_dat->interface);
+ return -EINVAL;
+ }
+
+ val |= GPR_ENET_QOS_CLK_GEN_EN;
+ return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
+ GPR_ENET_QOS_INTF_MODE_MASK, val);
+};
+
+static int
+imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ int ret = 0;
+
+ /* TBD: depends on imx8dxl scu interfaces to be upstreamed */
+ return ret;
+}
+
+static int imx_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct imx_priv_data *dwmac = priv;
+ int ret;
+
+ plat_dat = dwmac->plat_dat;
+
+ ret = clk_prepare_enable(dwmac->clk_mem);
+ if (ret) {
+ dev_err(&pdev->dev, "mem clock enable failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->clk_tx);
+ if (ret) {
+ dev_err(&pdev->dev, "tx clock enable failed\n");
+ goto clk_tx_en_failed;
+ }
+
+ if (dwmac->ops->set_intf_mode) {
+ ret = dwmac->ops->set_intf_mode(plat_dat);
+ if (ret)
+ goto intf_mode_failed;
+ }
+
+ return 0;
+
+intf_mode_failed:
+ clk_disable_unprepare(dwmac->clk_tx);
+clk_tx_en_failed:
+ clk_disable_unprepare(dwmac->clk_mem);
+ return ret;
+}
+
+static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct imx_priv_data *dwmac = priv;
+
+ if (dwmac->clk_tx)
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_mem);
+}
+
+static void imx_dwmac_fix_speed(void *priv, unsigned int speed)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct imx_priv_data *dwmac = priv;
+ unsigned long rate;
+ int err;
+
+ plat_dat = dwmac->plat_dat;
+
+ if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
+ (plat_dat->interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->interface == PHY_INTERFACE_MODE_MII))
+ return;
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ default:
+ dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ return;
+ }
+
+ err = clk_set_rate(dwmac->clk_tx, rate);
+ if (err < 0)
+ dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
+}
+
+static int
+imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int err = 0;
+
+ if (of_get_property(np, "snps,rmii_refclk_ext", NULL))
+ dwmac->rmii_refclk_ext = true;
+
+ dwmac->clk_tx = devm_clk_get(dev, "tx");
+ if (IS_ERR(dwmac->clk_tx)) {
+ dev_err(dev, "failed to get tx clock\n");
+ return PTR_ERR(dwmac->clk_tx);
+ }
+
+ dwmac->clk_mem = NULL;
+ if (of_machine_is_compatible("fsl,imx8dxl")) {
+ dwmac->clk_mem = devm_clk_get(dev, "mem");
+ if (IS_ERR(dwmac->clk_mem)) {
+ dev_err(dev, "failed to get mem clock\n");
+ return PTR_ERR(dwmac->clk_mem);
+ }
+ }
+
+ if (of_machine_is_compatible("fsl,imx8mp")) {
+ /* Binding doc describes the propety:
+ is required by i.MX8MP.
+ is optinoal for i.MX8DXL.
+ */
+ dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ if (IS_ERR(dwmac->intf_regmap))
+ return PTR_ERR(dwmac->intf_regmap);
+
+ err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
+ if (err) {
+ dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int imx_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct imx_priv_data *dwmac;
+ const struct imx_dwmac_ops *data;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return PTR_ERR(dwmac);
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ ret = -EINVAL;
+ goto err_match_data;
+ }
+
+ dwmac->ops = data;
+ dwmac->dev = &pdev->dev;
+
+ ret = imx_dwmac_parse_dt(dwmac, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse OF data\n");
+ goto err_parse_dt;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(dwmac->ops->addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "DMA mask set failed\n");
+ goto err_dma_mask;
+ }
+
+ plat_dat->init = imx_dwmac_init;
+ plat_dat->exit = imx_dwmac_exit;
+ plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
+ plat_dat->bsp_priv = dwmac;
+ dwmac->plat_dat = plat_dat;
+
+ ret = imx_dwmac_init(pdev, dwmac);
+ if (ret)
+ goto err_dwmac_init;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_drv_probe;
+
+ return 0;
+
+err_dwmac_init:
+err_drv_probe:
+ imx_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_dma_mask:
+err_parse_dt:
+err_match_data:
+ stmmac_remove_config_dt(pdev, plat_dat);
+ return ret;
+}
+
+static struct imx_dwmac_ops imx8mp_dwmac_data = {
+ .addr_width = 34,
+ .mac_rgmii_txclk_auto_adj = false,
+ .set_intf_mode = imx8mp_set_intf_mode,
+};
+
+static struct imx_dwmac_ops imx8dxl_dwmac_data = {
+ .addr_width = 32,
+ .mac_rgmii_txclk_auto_adj = true,
+ .set_intf_mode = imx8dxl_set_intf_mode,
+};
+
+static const struct of_device_id imx_dwmac_match[] = {
+ { .compatible = "nxp,imx8mp-dwmac-eqos", .data = &imx8mp_dwmac_data },
+ { .compatible = "nxp,imx8dxl-dwmac-eqos", .data = &imx8dxl_dwmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx_dwmac_match);
+
+static struct platform_driver imx_dwmac_driver = {
+ .probe = imx_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "imx-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = imx_dwmac_match,
+ },
+};
+module_platform_driver(imx_dwmac_driver);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP imx8 DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 2e4aaedb93f5..2ac9dfb3462c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -83,13 +83,9 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
/* assert clk_req */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data |= SERDES_PLL_CLK;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for clk_ack assertion */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -103,13 +99,9 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
}
/* assert lane reset */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data |= SERDES_RST;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for assert lane reset reflection */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -123,14 +115,12 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
}
/* move power state to P0 */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PWR_ST_MASK;
data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* Check for P0 state */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -159,14 +149,12 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
/* move power state to P3 */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PWR_ST_MASK;
data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* Check for P3 state */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -180,13 +168,9 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
/* de-assert clk_req */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_PLL_CLK;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for clk_ack de-assert */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -200,13 +184,9 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
/* de-assert lane reset */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR0);
-
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data &= ~SERDES_RST;
-
- mdiobus_write(priv->mii, serdes_phy_addr,
- SERDES_GCR0, data);
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
/* check for de-assert lane reset reflection */
data = serdes_status_poll(priv, serdes_phy_addr,
@@ -252,6 +232,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
static int intel_mgbe_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ int ret;
int i;
plat->clk_csr = 5;
@@ -324,7 +305,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
plat->stmmac_clk = NULL;
}
- clk_prepare_enable(plat->stmmac_clk);
+
+ ret = clk_prepare_enable(plat->stmmac_clk);
+ if (ret) {
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+ return ret;
+ }
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -341,16 +327,11 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- int ret;
-
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
- return 0;
+ return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
@@ -366,7 +347,7 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
return ehl_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_sgmii1g_info = {
.setup = ehl_sgmii_data,
};
@@ -380,7 +361,7 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
return ehl_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_rgmii1g_info = {
.setup = ehl_rgmii_data,
};
@@ -399,7 +380,7 @@ static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
return ehl_pse0_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse0_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
.setup = ehl_pse0_rgmii1g_data,
};
@@ -412,7 +393,7 @@ static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
return ehl_pse0_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse0_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
.setup = ehl_pse0_sgmii1g_data,
};
@@ -431,7 +412,7 @@ static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
return ehl_pse1_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse1_rgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
.setup = ehl_pse1_rgmii1g_data,
};
@@ -444,23 +425,18 @@ static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
return ehl_pse1_common_data(pdev, plat);
}
-static struct stmmac_pci_info ehl_pse1_sgmii1g_pci_info = {
+static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
.setup = ehl_pse1_sgmii1g_data,
};
static int tgl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- int ret;
-
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
- return 0;
+ return intel_mgbe_common_data(pdev, plat);
}
static int tgl_sgmii_data(struct pci_dev *pdev,
@@ -474,7 +450,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
return tgl_common_data(pdev, plat);
}
-static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
+static struct stmmac_pci_info tgl_sgmii1g_info = {
.setup = tgl_sgmii_data,
};
@@ -577,7 +553,7 @@ static int quark_default_data(struct pci_dev *pdev,
return 0;
}
-static const struct stmmac_pci_info quark_pci_info = {
+static const struct stmmac_pci_info quark_info = {
.setup = quark_default_data,
};
@@ -600,11 +576,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
struct intel_priv_data *intel_priv;
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
- int i;
int ret;
- intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv),
- GFP_KERNEL);
+ intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
if (!intel_priv)
return -ENOMEM;
@@ -631,15 +605,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
return ret;
}
- /* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
- break;
- }
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
pci_set_master(pdev);
@@ -650,14 +618,23 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- pci_enable_msi(pdev);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
- res.wol_irq = pdev->irq;
- res.irq = pdev->irq;
+ res.addr = pcim_iomap_table(pdev)[0];
+ res.wol_irq = pci_irq_vector(pdev, 0);
+ res.irq = pci_irq_vector(pdev, 0);
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret) {
+ pci_free_irq_vectors(pdev);
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+ }
- return stmmac_dvr_probe(&pdev->dev, plat, &res);
+ return ret;
}
/**
@@ -671,19 +648,15 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
{
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- int i;
stmmac_dvr_remove(&pdev->dev);
- if (priv->plat->stmmac_clk)
- clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+ pci_free_irq_vectors(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+
+ pcim_iounmap_regions(pdev, BIT(0));
pci_disable_device(pdev);
}
@@ -742,26 +715,19 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
- { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID,
- &ehl_pse0_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID,
- &ehl_pse0_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID,
- &ehl_pse0_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID,
- &ehl_pse1_rgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID,
- &ehl_pse1_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID,
- &ehl_pse1_sgmii1g_pci_info) },
- { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID, &ehl_pse0_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID, &ehl_pse1_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
{}
};
-
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
static struct pci_driver intel_eth_pci_driver = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index a3934ca6a043..234e8b6816ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -32,7 +33,10 @@
#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
-#define PRG_ETH0_TXDLY_SHIFT 5
+/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
+ * cycle of the 125MHz RGMII TX clock):
+ * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
+ */
#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
/* divider for the result of m250_sel */
@@ -44,6 +48,27 @@
#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
+/* Bypass (= 0, the signal from the GPIO input directly connects to the
+ * internal sampling) or enable (= 1) the internal logic for RXEN and RXD[3:0]
+ * timing tuning.
+ */
+#define PRG_ETH0_ADJ_ENABLE BIT(13)
+/* Controls whether the RXEN and RXD[3:0] signals should be aligned with the
+ * input RX rising/falling edge and sent to the Ethernet internals. This sets
+ * the automatically delay and skew automatically (internally).
+ */
+#define PRG_ETH0_ADJ_SETUP BIT(14)
+/* An internal counter based on the "timing-adjustment" clock. The counter is
+ * cleared on both, the falling and rising edge of the RX_CLK. This selects the
+ * delay (= the counter value) when to start sampling RXEN and RXD[3:0].
+ */
+#define PRG_ETH0_ADJ_DELAY GENMASK(19, 15)
+/* Adjusts the skew between each bit of RXEN and RXD[3:0]. If a signal has a
+ * large input delay, the bit for that signal (RXEN = bit 0, RXD[3] = bit 1,
+ * ...) can be configured to be 1 to compensate for a delay of about 1ns.
+ */
+#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+
#define MUX_CLK_NUM_PARENTS 2
struct meson8b_dwmac;
@@ -60,6 +85,8 @@ struct meson8b_dwmac {
phy_interface_t phy_mode;
struct clk *rgmii_tx_clk;
u32 tx_delay_ns;
+ u32 rx_delay_ns;
+ struct clk *timing_adj_clk;
};
struct meson8b_dwmac_clk_configs {
@@ -240,30 +267,82 @@ static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
return 0;
}
+static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
+ struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ dwmac->rgmii_tx_clk);
+
+ return 0;
+}
+
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
+ u32 tx_dly_config, rx_dly_config, delay_config;
int ret;
- u8 tx_dly_val = 0;
+
+ tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
+ dwmac->tx_delay_ns >> 1);
+
+ if (dwmac->rx_delay_ns == 2)
+ rx_dly_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
+ else
+ rx_dly_config = 0;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ delay_config = tx_dly_config | rx_dly_config;
+ break;
case PHY_INTERFACE_MODE_RGMII_RXID:
- /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where
- * 8ns are exactly one cycle of the 125MHz RGMII TX clock):
- * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
- */
- tx_dly_val = dwmac->tx_delay_ns >> 1;
- /* fall through */
-
- case PHY_INTERFACE_MODE_RGMII_ID:
+ delay_config = tx_dly_config;
+ break;
case PHY_INTERFACE_MODE_RGMII_TXID:
+ delay_config = rx_dly_config;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_config = 0;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ };
+
+ if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
+ if (!dwmac->timing_adj_clk) {
+ dev_err(dwmac->dev,
+ "The timing-adjustment clock is mandatory for the RX delay re-timing\n");
+ return -EINVAL;
+ }
+
+ /* The timing adjustment logic is driven by a separate clock */
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->timing_adj_clk);
+ if (ret) {
+ dev_err(dwmac->dev,
+ "Failed to enable the timing-adjustment clock\n");
+ return ret;
+ }
+ }
+
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK |
+ PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP |
+ PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
+ delay_config);
+
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
-
/* Configure the 125MHz RGMII TX clock, the IP block changes
* the output automatically (= without us having to configure
* a register) based on the line-speed (125MHz for Gbit speeds,
@@ -276,34 +355,18 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return ret;
}
- ret = clk_prepare_enable(dwmac->rgmii_tx_clk);
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->rgmii_tx_clk);
if (ret) {
dev_err(dwmac->dev,
"failed to enable the RGMII TX clock\n");
return ret;
}
-
- devm_add_action_or_reset(dwmac->dev,
- (void(*)(void *))clk_disable_unprepare,
- dwmac->rgmii_tx_clk);
- break;
-
- case PHY_INTERFACE_MODE_RMII:
+ } else {
/* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK,
PRG_ETH0_INVERTED_RMII_CLK);
-
- /* TX clock delay cannot be configured in RMII mode */
- meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- 0);
-
- break;
-
- default:
- dev_err(dwmac->dev, "unsupported phy-mode %s\n",
- phy_modes(dwmac->phy_mode));
- return -EINVAL;
}
/* enable TX_CLK and PHY_REF_CLK generator */
@@ -358,6 +421,25 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
+ /* use 0ns as fallback since this is what most boards actually use */
+ if (of_property_read_u32(pdev->dev.of_node, "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ns))
+ dwmac->rx_delay_ns = 0;
+
+ if (dwmac->rx_delay_ns != 0 && dwmac->rx_delay_ns != 2) {
+ dev_err(&pdev->dev,
+ "The only allowed RX delays values are: 0ns, 2ns");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
+ "timing-adjustment");
+ if (IS_ERR(dwmac->timing_adj_clk)) {
+ ret = PTR_ERR(dwmac->timing_adj_clk);
+ goto err_remove_config_dt;
+ }
+
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index b2dc99289687..5d4df4c5254e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -29,6 +29,11 @@
#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16)
#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17)
+/* CLOCK feed to PHY*/
+#define ETH_CK_F_25M 25000000
+#define ETH_CK_F_50M 50000000
+#define ETH_CK_F_125M 125000000
+
/* Ethernet PHY interface selection in register SYSCFG Configuration
*------------------------------------------
* src |BIT(23)| BIT(22)| BIT(21)|BIT(20)|
@@ -58,33 +63,20 @@
*| | | 25MHz | 50MHz | |
* ---------------------------------------------------------------------------
*| MII | - | eth-ck | n/a | n/a |
- *| | | | | |
+ *| | | st,ext-phyclk | | |
* ---------------------------------------------------------------------------
*| GMII | - | eth-ck | n/a | n/a |
- *| | | | | |
+ *| | | st,ext-phyclk | | |
* ---------------------------------------------------------------------------
- *| RGMII | - | eth-ck | n/a | eth-ck (no pin) |
- *| | | | | st,eth-clk-sel |
+ *| RGMII | - | eth-ck | n/a | eth-ck |
+ *| | | st,ext-phyclk | | st,eth-clk-sel or|
+ *| | | | | st,ext-phyclk |
* ---------------------------------------------------------------------------
*| RMII | - | eth-ck | eth-ck | n/a |
- *| | | | st,eth-ref-clk-sel | |
+ *| | | st,ext-phyclk | st,eth-ref-clk-sel | |
+ *| | | | or st,ext-phyclk | |
* ---------------------------------------------------------------------------
*
- * BIT(17) : set this bit in RMII mode when you have PHY without crystal 50MHz
- * BIT(16) : set this bit in GMII/RGMII PHY when you do not want use 125Mhz
- * from PHY
- *-----------------------------------------------------
- * src | BIT(17) | BIT(16) |
- *-----------------------------------------------------
- * MII | n/a | n/a |
- *-----------------------------------------------------
- * GMII | n/a | st,eth-clk-sel |
- *-----------------------------------------------------
- * RGMII | n/a | st,eth-clk-sel |
- *-----------------------------------------------------
- * RMII | st,eth-ref-clk-sel | n/a |
- *-----------------------------------------------------
- *
*/
struct stm32_dwmac {
@@ -93,6 +85,8 @@ struct stm32_dwmac {
struct clk *clk_eth_ck;
struct clk *clk_ethstp;
struct clk *syscfg_clk;
+ int ext_phyclk;
+ int enable_eth_ck;
int eth_clk_sel_reg;
int eth_ref_clk_sel_reg;
int irq_pwr_wakeup;
@@ -155,14 +149,17 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(dwmac->clk_eth_ck);
- if (ret) {
- clk_disable_unprepare(dwmac->syscfg_clk);
- return ret;
+ if (dwmac->enable_eth_ck) {
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret) {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ return ret;
+ }
}
} else {
clk_disable_unprepare(dwmac->syscfg_clk);
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
}
return ret;
}
@@ -170,24 +167,34 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- u32 reg = dwmac->mode_reg;
+ u32 reg = dwmac->mode_reg, clk_rate;
int val;
+ clk_rate = clk_get_rate(dwmac->clk_eth_ck);
+ dwmac->enable_eth_ck = false;
switch (plat_dat->interface) {
case PHY_INTERFACE_MODE_MII:
+ if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
+ dwmac->enable_eth_ck = true;
val = SYSCFG_PMCR_ETH_SEL_MII;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
break;
case PHY_INTERFACE_MODE_GMII:
val = SYSCFG_PMCR_ETH_SEL_GMII;
- if (dwmac->eth_clk_sel_reg)
+ if (clk_rate == ETH_CK_F_25M &&
+ (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_PMCR_ETH_SEL_RMII;
- if (dwmac->eth_ref_clk_sel_reg)
+ if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) &&
+ (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -195,8 +202,11 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
val = SYSCFG_PMCR_ETH_SEL_RGMII;
- if (dwmac->eth_clk_sel_reg)
+ if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) &&
+ (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ }
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
break;
default:
@@ -294,6 +304,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
struct device_node *np = dev->of_node;
int err = 0;
+ /* Ethernet PHY have no crystal */
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+
/* Gigabit Ethernet 125MHz clock selection. */
dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
@@ -431,7 +444,8 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->syscfg_clk);
- clk_disable_unprepare(dwmac->clk_eth_ck);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 28cac28253b8..61f3249bd724 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -90,6 +90,7 @@
#define GMAC_VLAN_CSVL BIT(19)
#define GMAC_VLAN_VLC GENMASK(17, 16)
#define GMAC_VLAN_VLC_SHIFT 16
+#define GMAC_VLAN_VLHT GENMASK(15, 0)
/* MAC VLAN Tag */
#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 39692d15d80c..ecd834e0e121 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -450,6 +450,12 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
if (vid > 4095)
return -EINVAL;
+ if (hw->promisc) {
+ netdev_err(dev,
+ "Adding VLAN in promisc mode not supported\n");
+ return -EPERM;
+ }
+
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
/* For single VLAN filter, VID 0 means VLAN promiscuous */
@@ -499,6 +505,12 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
{
int i, ret = 0;
+ if (hw->promisc) {
+ netdev_err(dev,
+ "Deleting VLAN in promisc mode not supported\n");
+ return -EPERM;
+ }
+
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
@@ -523,9 +535,45 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
return ret;
}
+static void dwmac4_vlan_promisc_enable(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ dwmac4_write_single_vlan(dev, 0);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
+ dwmac4_write_vlan_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+ if (hash & GMAC_VLAN_VLHT) {
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+ if (value & GMAC_VLAN_VTHM) {
+ value &= ~GMAC_VLAN_VTHM;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ }
+ }
+}
+
static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
struct mac_device_info *hw)
{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
u32 val;
int i;
@@ -542,6 +590,13 @@ static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
dwmac4_write_vlan_filter(dev, hw, i, val);
}
}
+
+ hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+ if (hash & GMAC_VLAN_VLHT) {
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+ value |= GMAC_VLAN_VTHM;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ }
}
static void dwmac4_set_filter(struct mac_device_info *hw,
@@ -624,6 +679,18 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
value |= GMAC_PACKET_FILTER_VTFE;
writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ if (dev->flags & IFF_PROMISC) {
+ if (!hw->promisc) {
+ hw->promisc = 1;
+ dwmac4_vlan_promisc_enable(dev, hw);
+ }
+ } else {
+ if (hw->promisc) {
+ hw->promisc = 0;
+ dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
+ }
+ }
}
static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7e9cbfd23530..73677c3b33b6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3544,15 +3544,6 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
}
-
-static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
-{
- if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
- return 0;
-
- return 1;
-}
-
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 3fb21f7ac9fb..272cb47af9f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -217,15 +217,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- struct net_device *ndev = dev_get_drvdata(&pdev->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
int i;
stmmac_dvr_remove(&pdev->dev);
- if (priv->plat->stmmac_clk)
- clk_unregister_fixed_rate(priv->plat->stmmac_clk);
-
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index bcda49dcf619..f32317fa75c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -507,7 +507,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") ||
- of_device_is_compatible(np, "snps,dwmac-4.20a")) {
+ of_device_is_compatible(np, "snps,dwmac-4.20a") ||
+ of_device_is_compatible(np, "snps,dwmac-5.10a")) {
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index f1c8615ab6f0..debd3c3fa6fb 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -237,12 +237,6 @@ static inline void cas_lock_tx(struct cas *cp)
spin_lock_nested(&cp->tx_lock[i], i);
}
-static inline void cas_lock_all(struct cas *cp)
-{
- spin_lock_irq(&cp->lock);
- cas_lock_tx(cp);
-}
-
/* WTZ: QA was finding deadlock problems with the previous
* versions after long test runs with multiple cards per machine.
* See if replacing cas_lock_all with safer versions helps. The
@@ -266,12 +260,6 @@ static inline void cas_unlock_tx(struct cas *cp)
spin_unlock(&cp->tx_lock[i - 1]);
}
-static inline void cas_unlock_all(struct cas *cp)
-{
- cas_unlock_tx(cp);
- spin_unlock_irq(&cp->lock);
-}
-
#define cas_unlock_all_restore(cp, flags) \
do { \
struct cas *xxxcp = (cp); \
@@ -5059,7 +5047,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cp->cas_flags & CAS_FLAG_SATURN)
cas_saturn_firmware_init(cp);
- cp->init_block = (struct cas_init_block *)
+ cp->init_block =
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 40a2ce0ca808..e28727297563 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1362,18 +1362,6 @@ static void print_rxfd(struct rxf_desc *rxfd)
* As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
*/
-/*************************************************************************
- * Tx DB *
- *************************************************************************/
-static inline int bdx_tx_db_size(struct txdb *db)
-{
- int taken = db->wptr - db->rptr;
- if (taken < 0)
- taken = db->size + 1 + taken; /* (size + 1) equals memsz */
-
- return db->size - taken;
-}
-
/**
* __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
* @db: tx data base
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 62f809b67469..182d10f171f6 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -94,6 +94,7 @@ config TI_K3_AM65_CPSW_NUSS
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select TI_DAVINCI_MDIO
imply PHY_TI_GMII_SEL
+ depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
This driver supports TI K3 AM654/J721E CPSW2G Ethernet SubSystem.
The two-port Gigabit Ethernet MAC (MCU_CPSW0) subsystem provides
@@ -104,6 +105,28 @@ config TI_K3_AM65_CPSW_NUSS
To compile this driver as a module, choose M here: the module
will be called ti-am65-cpsw-nuss.
+config TI_K3_AM65_CPTS
+ tristate "TI K3 AM65x CPTS"
+ depends on ARCH_K3 && OF
+ depends on PTP_1588_CLOCK
+ help
+ Say y here to support the TI K3 AM65x CPTS with 1588 features such as
+ PTP hardware clock for each CPTS device and network packets
+ timestamping where applicable.
+ Depending on integration CPTS blocks enable compliance with
+ the IEEE 1588-2008 standard for a precision clock synchronization
+ protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn)
+ and PCIe Subsystem Precision Time Measurement (PTM).
+
+config TI_AM65_CPSW_TAS
+ bool "Enable TAS offload in AM65 CPSW"
+ depends on TI_K3_AM65_CPSW_NUSS && NET_SCH_TAPRIO && TI_K3_AM65_CPTS
+ help
+ Say y here to support Time Aware Shaper(TAS) offload in AM65 CPSW.
+ AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
+ defined in IEEE 802.1Q 2018. The EST scheduler runs on CPTS and the
+ TAS/EST schedule is updated in the Fetch RAM memory of the CPSW.
+
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
select TI_DAVINCI_MDIO
@@ -133,7 +156,7 @@ config TLAN
Devices currently supported by this driver are Compaq Netelligent,
Compaq NetFlex and Olicom cards. Please read the file
- <file:Documentation/networking/device_drivers/ti/tlan.txt>
+ <file:Documentation/networking/device_drivers/ti/tlan.rst>
for more details.
To compile this driver as a module, choose M here. The module
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index cb26a9d21869..6e779292545d 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index c3502aa15ea0..8c4690f3ebcb 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -12,6 +12,7 @@
#include "am65-cpsw-nuss.h"
#include "cpsw_ale.h"
+#include "am65-cpts.h"
#define AM65_CPSW_REGDUMP_VER 0x1
@@ -694,6 +695,27 @@ static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
hw_stats[i].offset);
}
+static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return ethtool_op_get_ts_info(ndev, info);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = am65_cpts_phc_index(common->cpts);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
@@ -708,9 +730,17 @@ static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int rrobin;
+
+ rrobin = !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+
+ if (common->est_enabled && rrobin) {
+ netdev_err(ndev,
+ "p0-rx-ptype-rrobin flag conflicts with QOS\n");
+ return -EINVAL;
+ }
- common->pf_p0_rx_ptype_rrobin =
- !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+ common->pf_p0_rx_ptype_rrobin = rrobin;
am65_cpsw_nuss_set_p0_ptype(common);
return 0;
@@ -730,7 +760,7 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_sset_count = am65_cpsw_get_sset_count,
.get_strings = am65_cpsw_get_strings,
.get_ethtool_stats = am65_cpsw_get_ethtool_stats,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = am65_cpsw_get_ethtool_ts_info,
.get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
.set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 88f52a2f85b3..87a4775ed53a 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -30,18 +30,21 @@
#include "cpsw_sl.h"
#include "am65-cpsw-nuss.h"
#include "k3-cppi-desc-pool.h"
+#include "am65-cpts.h"
#define AM65_CPSW_SS_BASE 0x0
#define AM65_CPSW_SGMII_BASE 0x100
#define AM65_CPSW_XGMII_BASE 0x2100
#define AM65_CPSW_CPSW_NU_BASE 0x20000
#define AM65_CPSW_NU_PORTS_BASE 0x1000
+#define AM65_CPSW_NU_FRAM_BASE 0x12000
#define AM65_CPSW_NU_STATS_BASE 0x1a000
#define AM65_CPSW_NU_ALE_BASE 0x1e000
#define AM65_CPSW_NU_CPTS_BASE 0x1d000
#define AM65_CPSW_NU_PORTS_OFFSET 0x1000
#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
+#define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
#define AM65_CPSW_MAX_PORTS 8
@@ -187,9 +190,11 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ am65_cpsw_qos_link_up(ndev, phy->speed);
netif_tx_wake_all_queues(ndev);
} else {
int tmo;
+
/* disable forwarding */
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
@@ -203,6 +208,7 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
cpsw_sl_ctl_reset(port->slave.mac_sl);
+ am65_cpsw_qos_link_down(ndev);
netif_tx_stop_all_queues(ndev);
}
@@ -668,6 +674,18 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ ns = ((u64)psdata[1] << 32) | psdata[0];
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
/* RX psdata[2] word format - checksum information */
#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
@@ -745,6 +763,9 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
skb->dev = ndev;
psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* add RX timestamp */
+ if (port->rx_ts_enabled)
+ am65_cpsw_nuss_rx_ts(skb, psdata);
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
@@ -904,6 +925,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
ndev = skb->dev;
+ am65_cpts_tx_timestamp(common->cpts, skb);
+
ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
@@ -995,6 +1018,10 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
/* padding enabled in hw */
pkt_len = skb_headlen(skb);
+ /* SKB TX timestamp */
+ if (port->tx_ts_enabled)
+ am65_cpts_prep_tx_timestamp(common->cpts, skb);
+
q_idx = skb_get_queue_mapping(skb);
dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
@@ -1158,6 +1185,111 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
return 0;
}
+static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* TX HW timestamp */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ port->rx_ts_enabled = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ port->rx_ts_enabled = true;
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
+
+ /* cfg TX timestamp */
+ seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
+ AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
+
+ ts_vlan_ltype = ETH_P_8021Q;
+
+ ts_ctrl_ltype2 = ETH_P_1588 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
+
+ ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
+ AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
+
+ if (port->tx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+
+ writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
+ writel(ts_vlan_ltype, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
+ writel(ts_ctrl_ltype2, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
+ writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
+
+ /* en/dis RX timestamp */
+ am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = port->tx_ts_enabled ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = port->rx_ts_enabled ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
struct ifreq *req, int cmd)
{
@@ -1166,6 +1298,13 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_set(ndev, req);
+ case SIOCGHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_get(ndev, req);
+ }
+
if (!port->slave.phy)
return -EOPNOTSUPP;
@@ -1244,6 +1383,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
+ .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
};
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
@@ -1531,6 +1671,40 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
return 0;
}
+static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct device_node *node;
+ struct am65_cpts *cpts;
+ void __iomem *reg_base;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return 0;
+
+ node = of_get_child_by_name(dev->of_node, "cpts");
+ if (!node) {
+ dev_err(dev, "%s cpts not found\n", __func__);
+ return -ENOENT;
+ }
+
+ reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
+ cpts = am65_cpts_create(dev, reg_base, node);
+ if (IS_ERR(cpts)) {
+ int ret = PTR_ERR(cpts);
+
+ if (ret == -EOPNOTSUPP) {
+ dev_info(dev, "cpts disabled\n");
+ return 0;
+ }
+
+ dev_err(dev, "cpts create err %d\n", ret);
+ return ret;
+ }
+ common->cpts = cpts;
+
+ return 0;
+}
+
static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
{
struct device_node *node, *port_np;
@@ -1571,6 +1745,9 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
(AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
port->name = of_get_property(port_np, "label", NULL);
+ port->fetch_ram_base =
+ common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
+ (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
port->disabled = !of_device_is_available(port_np);
if (port->disabled)
@@ -1863,10 +2040,21 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- /* We do not want to force this, as in some cases may not have child */
- if (ret)
- dev_warn(dev, "populating child nodes err:%d\n", ret);
+ node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!node) {
+ dev_warn(dev, "MDIO node not found\n");
+ } else if (of_device_is_available(node)) {
+ struct platform_device *mdio_pdev;
+
+ mdio_pdev = of_platform_device_create(node, NULL, dev);
+ if (!mdio_pdev) {
+ ret = -ENODEV;
+ goto err_pm_clear;
+ }
+
+ common->mdio_dev = &mdio_pdev->dev;
+ }
+ of_node_put(node);
am65_cpsw_nuss_get_ver(common);
@@ -1901,6 +2089,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
goto err_of_clear;
}
+ ret = am65_cpsw_init_cpts(common);
+ if (ret)
+ goto err_of_clear;
+
/* init ports */
for (i = 0; i < common->port_num; i++)
am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
@@ -1919,7 +2111,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return 0;
err_of_clear:
- of_platform_depopulate(dev);
+ of_platform_device_destroy(common->mdio_dev, NULL);
+err_pm_clear:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
@@ -1944,7 +2137,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
- of_platform_depopulate(dev);
+ of_platform_device_destroy(common->mdio_dev, NULL);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 41ae5b4c7931..9faf4fb1409b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -9,6 +9,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include "am65-cpsw-qos.h"
+
+struct am65_cpts;
#define HOST_PORT_NUM 0
@@ -35,8 +40,12 @@ struct am65_cpsw_port {
u32 port_id;
void __iomem *port_base;
void __iomem *stat_base;
+ void __iomem *fetch_ram_base;
bool disabled;
struct am65_cpsw_slave_data slave;
+ bool tx_ts_enabled;
+ bool rx_ts_enabled;
+ struct am65_cpsw_qos qos;
};
struct am65_cpsw_host {
@@ -72,6 +81,7 @@ struct am65_cpsw_pdata {
struct am65_cpsw_common {
struct device *dev;
+ struct device *mdio_dev;
const struct am65_cpsw_pdata *pdata;
void __iomem *ss_base;
@@ -96,8 +106,9 @@ struct am65_cpsw_common {
u32 nuss_ver;
u32 cpsw_ver;
-
bool pf_p0_rx_ptype_rrobin;
+ struct am65_cpts *cpts;
+ int est_enabled;
};
struct am65_cpsw_ndev_stats {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
new file mode 100644
index 000000000000..32eac04468bb
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet QoS submodule
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * quality of service module includes:
+ * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/time.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-qos.h"
+#include "am65-cpts.h"
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+enum timer_act {
+ TACT_PROG, /* need program timer */
+ TACT_NEED_STOP, /* need stop first */
+ TACT_SKIP_PROG, /* just buffer can be updated */
+};
+
+static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
+{
+ return port->qos.est_oper || port->qos.est_admin;
+}
+
+static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
+{
+ u32 val;
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+
+ if (enable)
+ val |= AM65_CPSW_CTL_EST_EN;
+ else
+ val &= ~AM65_CPSW_CTL_EST_EN;
+
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->est_enabled = enable;
+}
+
+static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (enable)
+ val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
+ else
+ val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
+}
+
+/* target new EST RAM buffer, actual toggle happens after cycle completion */
+static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
+ int buf_num)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ if (buf_num)
+ val |= AM65_CPSW_PN_EST_BUFSEL;
+ else
+ val &= ~AM65_CPSW_PN_EST_BUFSEL;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+}
+
+/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
+ * admin -> oper or not
+ *
+ * Return true if already transitioned. i.e oper is equal to admin and buf
+ * numbers match (est_oper->buf match with est_admin->buf).
+ * false if before transition. i.e oper is not equal to admin, (i.e a
+ * previous admin command is waiting to be transitioned to oper state
+ * and est_oper->buf not match with est_oper->buf).
+ */
+static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
+ int *admin)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
+ *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
+
+ return *admin == *oper;
+}
+
+/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
+ * Admin to program the new schedule.
+ *
+ * Logic as follows:-
+ * If oper is same as admin, return the other buffer (!oper) as the admin
+ * buffer. If oper is not the same, driver let the current oper to continue
+ * as it is in the process of transitioning from admin -> oper. So keep the
+ * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
+ * EST CTL register. In the second iteration they will match and code returns.
+ * The actual buffer to write command is selected later before it is ready
+ * to update the schedule.
+ */
+static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
+{
+ int oper, admin;
+ int roll = 2;
+
+ while (roll--) {
+ if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return !oper;
+
+ /* admin is not set, so hinder transition as it's not allowed
+ * to touch memory in-flight, by targeting same oper buf.
+ */
+ am65_cpsw_port_est_assign_buf_num(ndev, oper);
+
+ dev_info(&ndev->dev,
+ "Prev. EST admin cycle is in transit %d -> %d\n",
+ oper, admin);
+ }
+
+ return admin;
+}
+
+static void am65_cpsw_admin_to_oper(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (port->qos.est_oper)
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = port->qos.est_admin;
+ port->qos.est_admin = NULL;
+}
+
+static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ val &= ~AM65_CPSW_PN_EST_ONEBUF;
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+
+ est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
+
+ /* rolled buf num means changed buf while configuring */
+ if (port->qos.est_oper && port->qos.est_admin &&
+ est_new->buf == port->qos.est_oper->buf)
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+static void am65_cpsw_est_set(struct net_device *ndev, int enable)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ int common_enable = 0;
+ int i;
+
+ am65_cpsw_port_est_enable(port, enable);
+
+ for (i = 0; i < common->port_num; i++)
+ common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
+
+ common_enable |= enable;
+ am65_cpsw_est_enable(common, common_enable);
+}
+
+/* This update is supposed to be used in any routine before getting real state
+ * of admin -> oper transition, particularly it's supposed to be used in some
+ * generic routine for providing real state to Taprio Qdisc.
+ */
+static void am65_cpsw_est_update_state(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int oper, admin;
+
+ if (!port->qos.est_admin)
+ return;
+
+ if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return;
+
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
+ * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
+ * bytes/nibbles that can be sent while transmission on given speed.
+ */
+static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
+{
+ u64 temp;
+
+ temp = ns * link_speed;
+ if (link_speed < SPEED_1000)
+ temp <<= 1;
+
+ return DIV_ROUND_UP(temp, 8 * 1000);
+}
+
+static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
+ int fetch_cnt,
+ int fetch_allow)
+{
+ u32 prio_mask, cmd_fetch_cnt, cmd;
+
+ do {
+ if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
+ fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
+ cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
+ } else {
+ cmd_fetch_cnt = fetch_cnt;
+ /* fetch count can't be less than 16? */
+ if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
+ cmd_fetch_cnt = 16;
+
+ fetch_cnt = 0;
+ }
+
+ prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
+ cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
+
+ writel(cmd, addr);
+ addr += 4;
+ } while (fetch_cnt);
+
+ return addr;
+}
+
+static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio,
+ int link_speed)
+{
+ int i, cmd_cnt, cmd_sum = 0;
+ u32 fetch_cnt;
+
+ for (i = 0; i < taprio->num_entries; i++) {
+ if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
+ dev_err(&ndev->dev, "Only SET command is supported");
+ return -EINVAL;
+ }
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
+ link_speed);
+
+ cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
+ if (!cmd_cnt)
+ cmd_cnt++;
+
+ cmd_sum += cmd_cnt;
+
+ if (!fetch_cnt)
+ break;
+ }
+
+ return cmd_sum;
+}
+
+static int am65_cpsw_est_check_scheds(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int cmd_num;
+
+ cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
+ port->qos.link_speed);
+ if (cmd_num < 0)
+ return cmd_num;
+
+ if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
+ dev_err(&ndev->dev, "No fetch RAM");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
+ void __iomem *ram_addr, *max_ram_addr;
+ struct tc_taprio_sched_entry *entry;
+ int i, ram_size;
+
+ ram_addr = port->fetch_ram_base;
+ ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
+ ram_addr += est_new->buf * ram_size;
+
+ max_ram_addr = ram_size + ram_addr;
+ for (i = 0; i < est_new->taprio.num_entries; i++) {
+ entry = &est_new->taprio.entries[i];
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
+ port->qos.link_speed);
+ fetch_allow = entry->gate_mask;
+ if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
+ dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
+ fetch_allow);
+
+ ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
+ fetch_allow);
+
+ if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
+ dev_info(&ndev->dev,
+ "next scheds after %d have no impact", i + 1);
+ break;
+ }
+
+ all_fetch_allow |= fetch_allow;
+ }
+
+ /* end cmd, enabling non-timed queues for potential over cycle time */
+ if (ram_addr < max_ram_addr)
+ writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
+}
+
+/**
+ * Enable ESTf periodic output, set cycle start time and interval.
+ */
+static int am65_cpsw_timer_set(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpts *cpts = common->cpts;
+ struct am65_cpts_estf_cfg cfg;
+
+ cfg.ns_period = est_new->taprio.cycle_time;
+ cfg.ns_start = est_new->taprio.base_time;
+
+ return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
+}
+
+static void am65_cpsw_timer_stop(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+
+ am65_cpts_estf_disable(cpts, port->port_id - 1);
+}
+
+static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+ u64 cur_time;
+ s64 diff;
+
+ if (!port->qos.est_oper)
+ return TACT_PROG;
+
+ taprio_new = &est_new->taprio;
+ taprio_oper = &port->qos.est_oper->taprio;
+
+ if (taprio_new->cycle_time != taprio_oper->cycle_time)
+ return TACT_NEED_STOP;
+
+ /* in order to avoid timer reset get base_time form oper taprio */
+ if (!taprio_new->base_time && taprio_oper)
+ taprio_new->base_time = taprio_oper->base_time;
+
+ if (taprio_new->base_time == taprio_oper->base_time)
+ return TACT_SKIP_PROG;
+
+ /* base times are cycle synchronized */
+ diff = taprio_new->base_time - taprio_oper->base_time;
+ diff = diff < 0 ? -diff : diff;
+ if (diff % taprio_new->cycle_time)
+ return TACT_NEED_STOP;
+
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
+ return TACT_SKIP_PROG;
+
+ /* TODO: Admin schedule at future time is not currently supported */
+ return TACT_NEED_STOP;
+}
+
+static void am65_cpsw_stop_est(struct net_device *ndev)
+{
+ am65_cpsw_est_set(ndev, 0);
+ am65_cpsw_timer_stop(ndev);
+}
+
+static void am65_cpsw_purge_est(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ am65_cpsw_stop_est(ndev);
+
+ if (port->qos.est_admin)
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ if (port->qos.est_oper)
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = NULL;
+ port->qos.est_admin = NULL;
+}
+
+static int am65_cpsw_configure_taprio(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpts *cpts = common->cpts;
+ int ret = 0, tact = TACT_PROG;
+
+ am65_cpsw_est_update_state(ndev);
+
+ if (!est_new->taprio.enable) {
+ am65_cpsw_stop_est(ndev);
+ return ret;
+ }
+
+ ret = am65_cpsw_est_check_scheds(ndev, est_new);
+ if (ret < 0)
+ return ret;
+
+ tact = am65_cpsw_timer_act(ndev, est_new);
+ if (tact == TACT_NEED_STOP) {
+ dev_err(&ndev->dev,
+ "Can't toggle estf timer, stop taprio first");
+ return -EINVAL;
+ }
+
+ if (tact == TACT_PROG)
+ am65_cpsw_timer_stop(ndev);
+
+ if (!est_new->taprio.base_time)
+ est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
+
+ am65_cpsw_port_est_get_buf_num(ndev, est_new);
+ am65_cpsw_est_set_sched_list(ndev, est_new);
+ am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+
+ am65_cpsw_est_set(ndev, est_new->taprio.enable);
+
+ if (tact == TACT_PROG) {
+ ret = am65_cpsw_timer_set(ndev, est_new);
+ if (ret) {
+ dev_err(&ndev->dev, "Failed to set cycle time");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
+ struct tc_taprio_qopt_offload *to)
+{
+ int i;
+
+ *to = *from;
+ for (i = 0; i < from->num_entries; i++)
+ to->entries[i] = from->entries[i];
+}
+
+static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct am65_cpsw_est *est_new;
+ size_t size;
+ int ret = 0;
+
+ if (taprio->cycle_time_extension) {
+ dev_err(&ndev->dev, "Failed to set cycle time extension");
+ return -EOPNOTSUPP;
+ }
+
+ size = sizeof(struct tc_taprio_sched_entry) * taprio->num_entries +
+ sizeof(struct am65_cpsw_est);
+
+ est_new = devm_kzalloc(&ndev->dev, size, GFP_KERNEL);
+ if (!est_new)
+ return -ENOMEM;
+
+ am65_cpsw_cp_taprio(taprio, &est_new->taprio);
+ ret = am65_cpsw_configure_taprio(ndev, est_new);
+ if (!ret) {
+ if (taprio->enable) {
+ if (port->qos.est_admin)
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ port->qos.est_admin = est_new;
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ am65_cpsw_purge_est(ndev);
+ }
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ ktime_t cur_time;
+ s64 delta;
+
+ port->qos.link_speed = link_speed;
+ if (!am65_cpsw_port_est_enabled(port))
+ return;
+
+ if (port->qos.link_down_time) {
+ cur_time = ktime_get();
+ delta = ktime_us_delta(cur_time, port->qos.link_down_time);
+ if (delta > USEC_PER_SEC) {
+ dev_err(&ndev->dev,
+ "Link has been lost too long, stopping TAS");
+ goto purge_est;
+ }
+ }
+
+ return;
+
+purge_est:
+ am65_cpsw_purge_est(ndev);
+}
+
+static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return -ENODEV;
+
+ if (!netif_running(ndev)) {
+ dev_err(&ndev->dev, "interface is down, link speed unknown\n");
+ return -ENETDOWN;
+ }
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ dev_err(&ndev->dev,
+ "p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
+ return -EINVAL;
+ }
+
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return -ENOLINK;
+
+ return am65_cpsw_set_taprio(ndev, type_data);
+}
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return am65_cpsw_setup_taprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ am65_cpsw_est_link_up(ndev, link_speed);
+ port->qos.link_down_time = 0;
+}
+
+void am65_cpsw_qos_link_down(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ if (!port->qos.link_down_time)
+ port->qos.link_down_time = ktime_get();
+
+ port->qos.link_speed = SPEED_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
new file mode 100644
index 000000000000..e8f1b6b59e93
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef AM65_CPSW_QOS_H_
+#define AM65_CPSW_QOS_H_
+
+#include <linux/netdevice.h>
+#include <net/pkt_sched.h>
+
+struct am65_cpsw_est {
+ int buf;
+ /* has to be the last one */
+ struct tc_taprio_qopt_offload taprio;
+};
+
+struct am65_cpsw_qos {
+ struct am65_cpsw_est *est_admin;
+ struct am65_cpsw_est *est_oper;
+ ktime_t link_down_time;
+ int link_speed;
+};
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed);
+void am65_cpsw_qos_link_down(struct net_device *ndev);
+
+#endif /* AM65_CPSW_QOS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
new file mode 100644
index 000000000000..c59a289e428c
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI K3 AM65x Common Platform Time Sync
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "am65-cpts.h"
+
+struct am65_genf_regs {
+ u32 comp_lo; /* Comparison Low Value 0:31 */
+ u32 comp_hi; /* Comparison High Value 32:63 */
+ u32 control; /* control */
+ u32 length; /* Length */
+ u32 ppm_low; /* PPM Load Low Value 0:31 */
+ u32 ppm_hi; /* PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Nudge value */
+} __aligned(32) __packed;
+
+#define AM65_CPTS_GENF_MAX_NUM 9
+#define AM65_CPTS_ESTF_MAX_NUM 8
+
+struct am65_cpts_regs {
+ u32 idver; /* Identification and version */
+ u32 control; /* Time sync control */
+ u32 rftclk_sel; /* Reference Clock Select Register */
+ u32 ts_push; /* Time stamp event push */
+ u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */
+ u32 ts_load_en; /* Time stamp load enable */
+ u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */
+ u32 ts_comp_length; /* Time Stamp Comparison Length */
+ u32 intstat_raw; /* Time sync interrupt status raw */
+ u32 intstat_masked; /* Time sync interrupt status masked */
+ u32 int_enable; /* Time sync interrupt enable */
+ u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */
+ u32 event_pop; /* Event interrupt pop */
+ u32 event_0; /* Event Time Stamp lo 0:31 */
+ u32 event_1; /* Event Type Fields */
+ u32 event_2; /* Event Type Fields domain */
+ u32 event_3; /* Event Time Stamp hi 32:63 */
+ u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */
+ u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */
+ u32 ts_add_val; /* Time Stamp Add value */
+ u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */
+ u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Time Stamp Nudge value */
+ u32 reserv[33];
+ struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
+ struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
+};
+
+/* CONTROL_REG */
+#define AM65_CPTS_CONTROL_EN BIT(0)
+#define AM65_CPTS_CONTROL_INT_TEST BIT(1)
+#define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
+#define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
+#define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
+#define AM65_CPTS_CONTROL_64MODE BIT(5)
+#define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
+#define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
+#define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
+#define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
+#define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
+#define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
+#define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
+#define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
+#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
+
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
+
+/* RFTCLK_SEL_REG */
+#define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
+
+/* TS_PUSH_REG */
+#define AM65_CPTS_TS_PUSH BIT(0)
+
+/* TS_LOAD_EN_REG */
+#define AM65_CPTS_TS_LOAD_EN BIT(0)
+
+/* INTSTAT_RAW_REG */
+#define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
+
+/* INTSTAT_MASKED_REG */
+#define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
+
+/* INT_ENABLE_REG */
+#define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
+
+/* TS_COMP_NUDGE_REG */
+#define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
+
+/* EVENT_POP_REG */
+#define AM65_CPTS_EVENT_POP BIT(0)
+
+/* EVENT_1_REG */
+#define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
+
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
+
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
+
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
+
+/* EVENT_2_REG */
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
+
+enum {
+ AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */
+ AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+ AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+ AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */
+ AM65_CPTS_EV_RX, /* Ethernet Receive Event */
+ AM65_CPTS_EV_TX, /* Ethernet Transmit Event */
+ AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */
+ AM65_CPTS_EV_HOST, /* Host Transmit Event */
+};
+
+struct am65_cpts_event {
+ struct list_head list;
+ unsigned long tmo;
+ u32 event1;
+ u32 event2;
+ u64 timestamp;
+};
+
+#define AM65_CPTS_FIFO_DEPTH (16)
+#define AM65_CPTS_MAX_EVENTS (32)
+#define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
+#define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define AM65_CPTS_MIN_PPM 0x400
+
+struct am65_cpts {
+ struct device *dev;
+ struct am65_cpts_regs __iomem *reg;
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ int phc_index;
+ struct clk_hw *clk_mux_hw;
+ struct device_node *clk_mux_np;
+ struct clk *refclk;
+ u32 refclk_freq;
+ struct list_head events;
+ struct list_head pool;
+ struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
+ spinlock_t lock; /* protects events lists*/
+ u32 ext_ts_inputs;
+ u32 genf_num;
+ u32 ts_add_val;
+ int irq;
+ struct mutex ptp_clk_lock; /* PHC access sync */
+ u64 timestamp;
+ u32 genf_enable;
+ u32 hw_ts_enable;
+ struct sk_buff_head txq;
+};
+
+struct am65_cpts_skb_cb_data {
+ unsigned long tmo;
+ u32 skb_mtype_seqid;
+};
+
+#define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
+#define am65_cpts_read32(c, r) readl(&(c)->reg->r)
+
+static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
+{
+ u32 val;
+
+ val = upper_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_hi);
+ val = lower_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_lo);
+
+ am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
+}
+
+static void am65_cpts_set_add_val(struct am65_cpts *cpts)
+{
+ /* select coefficient according to the rate */
+ cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
+
+ am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
+}
+
+static void am65_cpts_disable(struct am65_cpts *cpts)
+{
+ am65_cpts_write32(cpts, 0, control);
+ am65_cpts_write32(cpts, 0, int_enable);
+}
+
+static int am65_cpts_event_get_port(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
+ AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
+}
+
+static int am65_cpts_event_get_type(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
+}
+
+static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
+ return removed ? 0 : -1;
+}
+
+static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ u32 r = am65_cpts_read32(cpts, intstat_raw);
+
+ if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
+ event->timestamp = am65_cpts_read32(cpts, event_0);
+ event->event1 = am65_cpts_read32(cpts, event_1);
+ event->event2 = am65_cpts_read32(cpts, event_2);
+ event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
+ am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
+ return false;
+ }
+ return true;
+}
+
+static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+{
+ struct ptp_clock_event pevent;
+ struct am65_cpts_event *event;
+ bool schedule = false;
+ int i, type, ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
+ event = list_first_entry_or_null(&cpts->pool,
+ struct am65_cpts_event, list);
+
+ if (!event) {
+ if (am65_cpts_cpts_purge_events(cpts)) {
+ dev_err(cpts->dev, "cpts: event pool empty\n");
+ ret = -1;
+ goto out;
+ }
+ continue;
+ }
+
+ if (am65_cpts_fifo_pop_event(cpts, event))
+ break;
+
+ type = am65_cpts_event_get_type(event);
+ switch (type) {
+ case AM65_CPTS_EV_PUSH:
+ cpts->timestamp = event->timestamp;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
+ cpts->timestamp);
+ break;
+ case AM65_CPTS_EV_RX:
+ case AM65_CPTS_EV_TX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_del_init(&event->list);
+ list_add_tail(&event->list, &cpts->events);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ schedule = true;
+ break;
+ case AM65_CPTS_EV_HW:
+ pevent.index = am65_cpts_event_get_port(event) - 1;
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
+ pevent.index, event->timestamp);
+
+ ptp_clock_event(cpts->ptp_clock, &pevent);
+ break;
+ case AM65_CPTS_EV_HOST:
+ break;
+ case AM65_CPTS_EV_ROLL:
+ case AM65_CPTS_EV_HALF:
+ case AM65_CPTS_EV_TS_COMP:
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
+ type,
+ event->event1, event->event2,
+ event->timestamp);
+ break;
+ default:
+ dev_err(cpts->dev, "cpts: unknown event type\n");
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (schedule)
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+
+ return ret;
+}
+
+static u64 am65_cpts_gettime(struct am65_cpts *cpts,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+ u64 val = 0;
+
+ /* temporarily disable cpts interrupt to avoid intentional
+ * doubled read. Interrupt can be in-flight - it's Ok.
+ */
+ am65_cpts_write32(cpts, 0, int_enable);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
+ am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
+ am65_cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ am65_cpts_fifo_read(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ val = cpts->timestamp;
+
+ return val;
+}
+
+static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
+{
+ struct am65_cpts *cpts = dev_id;
+
+ if (am65_cpts_fifo_read(cpts))
+ dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
+
+ return IRQ_HANDLED;
+}
+
+/* PTP clock operations */
+static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ int neg_adj = 0;
+ u64 adj_period;
+ u32 val;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* base freq = 1GHz = 1 000 000 000
+ * ppb_norm = ppb * base_freq / clock_freq;
+ * ppm_norm = ppb_norm / 1000
+ * adj_period = 1 000 000 / ppm_norm
+ * adj_period = 1 000 000 000 / ppb_norm
+ * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
+ * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
+ * adj_period = clock_freq / ppb
+ */
+ adj_period = div_u64(cpts->refclk_freq, ppb);
+
+ mutex_lock(&cpts->ptp_clk_lock);
+
+ val = am65_cpts_read32(cpts, control);
+ if (neg_adj)
+ val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
+ else
+ val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
+ am65_cpts_write32(cpts, val, control);
+
+ val = upper_32_bits(adj_period) & 0x3FF;
+ am65_cpts_write32(cpts, val, ts_ppm_hi);
+ val = lower_32_bits(adj_period);
+ am65_cpts_write32(cpts, val, ts_ppm_low);
+
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ s64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ ns += delta;
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, sts);
+ mutex_unlock(&cpts->ptp_clk_lock);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ u64 ns;
+
+ /* reuse ptp_clk_lock as it serialize ts push */
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
+
+static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ v = am65_cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ am65_cpts_write32(cpts, v, control);
+}
+
+static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
+{
+ if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_extts_enable_hw(cpts, index, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
+ __func__, index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg)
+{
+ u64 cycles;
+ u32 val;
+
+ cycles = cfg->ns_period * cpts->refclk_freq;
+ cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
+ if (cycles > U32_MAX)
+ return -EINVAL;
+
+ /* according to TRM should be zeroed */
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ val = upper_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_hi);
+ val = lower_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
+
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
+
+static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ u64 ns_period, ns_start, cycles;
+ struct timespec64 ts;
+ u32 val;
+
+ if (on) {
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
+
+ ts.tv_sec = req->start.sec;
+ ts.tv_nsec = req->start.nsec;
+ ns_start = timespec64_to_ns(&ts);
+
+ val = upper_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
+ val = lower_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, genf[req->index].length);
+
+ cpts->genf_enable |= BIT(req->index);
+ } else {
+ am65_cpts_write32(cpts, 0, genf[req->index].length);
+
+ cpts->genf_enable &= ~BIT(req->index);
+ }
+}
+
+static int am65_cpts_perout_enable(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_perout_enable_hw(cpts, req, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
+ __func__, req->index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return am65_cpts_extts_enable(cpts, rq->extts.index, on);
+ case PTP_CLK_REQ_PEROUT:
+ return am65_cpts_perout_enable(cpts, &rq->perout, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
+
+static struct ptp_clock_info am65_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "CTPS timer",
+ .adjfreq = am65_cpts_ptp_adjfreq,
+ .adjtime = am65_cpts_ptp_adjtime,
+ .gettimex64 = am65_cpts_ptp_gettimex,
+ .settime64 = am65_cpts_ptp_settime,
+ .enable = am65_cpts_ptp_enable,
+ .do_aux_work = am65_cpts_ts_work,
+};
+
+static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ /* no need to grab txq.lock as access is always done under cpts->lock */
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct am65_cpts_skb_cb_data *skb_cb =
+ (struct am65_cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ u64 ns = event->timestamp;
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev,
+ "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 100 ms */
+ dev_dbg(cpts->dev,
+ "expiring tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void am65_cpts_find_ts(struct am65_cpts *cpts)
+{
+ struct am65_cpts_event *event;
+ struct list_head *this, *next;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (am65_cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ unsigned long flags;
+ long delay = -1;
+
+ am65_cpts_find_ts(cpts);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return delay;
+}
+
+/**
+ * am65_cpts_rx_enable - enable rx timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions enables rx packets timestamping. The CPTS can timestamp all
+ * rx packets.
+ */
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+ u32 val;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ val = am65_cpts_read32(cpts, control);
+ if (en)
+ val |= AM65_CPTS_CONTROL_TSTAMP_EN;
+ else
+ val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
+ am65_cpts_write32(cpts, val, control);
+ mutex_unlock(&cpts->ptp_clk_lock);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
+
+static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
+{
+ unsigned int ptp_class = ptp_classify_raw(skb);
+ u8 *msgtype, *data = skb->data;
+ unsigned int offset = 0;
+ __be16 *seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
+
+ if (ptp_class & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (ptp_class & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return 0;
+ }
+
+ if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ return 0;
+
+ if (unlikely(ptp_class & PTP_CLASS_V1))
+ msgtype = data + offset + OFF_PTP_CONTROL;
+ else
+ msgtype = data + offset;
+
+ seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ *mtype_seqid = (*msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
+ AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
+ *mtype_seqid |= (ntohs(*seqid) & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ return 1;
+}
+
+/**
+ * am65_cpts_tx_timestamp - save tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions saves tx packet for timestamping if packet can be timestamped.
+ * The future processing is done in from PTP auxiliary worker.
+ */
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ return;
+
+ /* add frame to queue for processing later.
+ * The periodic FIFO check will handle this.
+ */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
+
+/**
+ * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions should be called from .xmit().
+ * It checks if packet can be timestamped, fills internal cpts data
+ * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
+ */
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+ int ret;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return;
+
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
+
+int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return cpts->phc_index;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
+
+static void cpts_free_clk_mux(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ of_clk_del_provider(cpts->clk_mux_np);
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+ of_node_put(cpts->clk_mux_np);
+}
+
+static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
+ struct device_node *node)
+{
+ unsigned int num_parents;
+ const char **parent_names;
+ char *clk_mux_name;
+ void __iomem *reg;
+ int ret = -EINVAL;
+
+ cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
+ if (!cpts->clk_mux_np)
+ return 0;
+
+ num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
+ cpts->clk_mux_np);
+ goto mux_fail;
+ }
+
+ parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(cpts->dev), cpts->clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ reg = &cpts->reg->rftclk_sel;
+ /* dev must be NULL to avoid recursive incrementing
+ * of module refcnt
+ */
+ cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
+ parent_names, num_parents,
+ 0, reg, 0, 5, 0, NULL);
+ if (IS_ERR(cpts->clk_mux_hw)) {
+ ret = PTR_ERR(cpts->clk_mux_hw);
+ goto mux_fail;
+ }
+
+ ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
+ cpts->clk_mux_hw);
+ if (ret)
+ goto clk_hw_register;
+
+ ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
+ if (ret)
+ dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
+
+ return ret;
+
+clk_hw_register:
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+mux_fail:
+ of_node_put(cpts->clk_mux_np);
+ return ret;
+}
+
+static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
+{
+ u32 prop[2];
+
+ if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
+ cpts->ext_ts_inputs = prop[0];
+
+ if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
+ cpts->genf_num = prop[0];
+
+ return cpts_of_mux_clk_setup(cpts, node);
+}
+
+static void am65_cpts_release(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ ptp_clock_unregister(cpts->ptp_clock);
+ am65_cpts_disable(cpts);
+ clk_disable_unprepare(cpts->refclk);
+}
+
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ struct am65_cpts *cpts;
+ int ret, i;
+
+ cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+ if (!cpts)
+ return ERR_PTR(-ENOMEM);
+
+ cpts->dev = dev;
+ cpts->reg = (struct am65_cpts_regs __iomem *)regs;
+
+ cpts->irq = of_irq_get_byname(node, "cpts");
+ if (cpts->irq <= 0) {
+ ret = cpts->irq ?: -ENXIO;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ number (err = %d)\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = am65_cpts_of_parse(cpts, node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_init(&cpts->ptp_clk_lock);
+ INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->pool);
+ spin_lock_init(&cpts->lock);
+ skb_queue_head_init(&cpts->txq);
+
+ for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
+ list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+ cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
+ if (IS_ERR(cpts->refclk)) {
+ ret = PTR_ERR(cpts->refclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(cpts->refclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ cpts->refclk_freq = clk_get_rate(cpts->refclk);
+
+ am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
+ cpts->ptp_info = am65_ptp_info;
+
+ if (cpts->ext_ts_inputs)
+ cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
+ if (cpts->genf_num)
+ cpts->ptp_info.n_per_out = cpts->genf_num;
+
+ am65_cpts_set_add_val(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | AM65_CPTS_CONTROL_64MODE,
+ control);
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ /* set time to the current system time */
+ am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
+
+ cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
+ if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
+ dev_err(dev, "Failed to register ptp clk %ld\n",
+ PTR_ERR(cpts->ptp_clock));
+ if (!cpts->ptp_clock)
+ ret = -ENODEV;
+ goto refclk_disable;
+ }
+ cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
+
+ ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
+ if (ret) {
+ dev_err(dev, "failed to add ptpclk reset action %d", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
+ am65_cpts_interrupt,
+ IRQF_ONESHOT, dev_name(dev), cpts);
+ if (ret < 0) {
+ dev_err(cpts->dev, "error attaching irq %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+ am65_cpts_read32(cpts, idver),
+ cpts->refclk_freq, cpts->ts_add_val);
+
+ return cpts;
+
+refclk_disable:
+ clk_disable_unprepare(cpts->refclk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_create);
+
+static int am65_cpts_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct am65_cpts *cpts;
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpts");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ cpts = am65_cpts_create(dev, base, node);
+ return PTR_ERR_OR_ZERO(cpts);
+}
+
+static const struct of_device_id am65_cpts_of_match[] = {
+ { .compatible = "ti,am65-cpts", },
+ { .compatible = "ti,j721e-cpts", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
+
+static struct platform_driver am65_cpts_driver = {
+ .probe = am65_cpts_probe,
+ .driver = {
+ .name = "am65-cpts",
+ .of_match_table = am65_cpts_of_match,
+ },
+};
+module_platform_driver(am65_cpts_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
new file mode 100644
index 000000000000..cf9fbc28fd03
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* TI K3 AM65 CPTS driver interface
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_CPTS_H_
+#define K3_CPTS_H_
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct am65_cpts;
+
+struct am65_cpts_estf_cfg {
+ u64 ns_period;
+ u64 ns_start;
+};
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node);
+int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg);
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+#else
+static inline struct am65_cpts *am65_cpts_create(struct device *dev,
+ void __iomem *regs,
+ struct device_node *node)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return -1;
+}
+
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+}
+
+static inline s64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ return 0;
+}
+
+static inline int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg)
+{
+ return 0;
+}
+
+static inline void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+}
+#endif
+
+#endif /* K3_CPTS_H_ */
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index a530afe3ce12..c20715107075 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -532,7 +532,7 @@ fatal_error:
}
-static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int queue;
unsigned int len;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ffeb8633e530..9b17bbbe102f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -406,6 +406,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
+ xdp.frame_sz = PAGE_SIZE;
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
@@ -1569,6 +1570,12 @@ static int cpsw_probe(struct platform_device *pdev)
return irq;
cpsw->irqs_table[1] = irq;
+ /* get misc irq*/
+ irq = platform_get_irq(pdev, 3);
+ if (irq <= 0)
+ return irq;
+ cpsw->misc_irq = irq;
+
/*
* This may be required here for child devices.
*/
@@ -1703,6 +1710,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_unregister_netdev_ret;
}
+ if (!cpsw->cpts)
+ goto skip_cpts;
+
+ ret = devm_request_irq(&pdev->dev, cpsw->misc_irq, cpsw_misc_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching misc irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
+ }
+
+ /* Enable misc CPTS evnt_pend IRQ */
+ cpts_set_irqpoll(cpsw->cpts, false);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+skip_cpts:
cpsw_notice(priv, probe,
"initialized device (regs %pa, irq %d, pool size %d)\n",
&ss_res->start, cpsw->irqs_table[0], descs_pool_size);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 9209e613257d..1247d35d42ef 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -348,6 +348,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
xdp.data_hard_start = pa;
xdp.rxq = &priv->xdp_rxq[ch];
+ xdp.frame_sz = PAGE_SIZE;
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
if (ret != CPSW_XDP_PASS)
@@ -1228,7 +1229,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->active_slave = 0;
data->channels = CPSW_MAX_QUEUES;
data->ale_entries = CPSW_ALE_NUM_ENTRIES;
- data->dual_emac = 1;
+ data->dual_emac = true;
data->bd_ram_size = CPSW_BD_RAM_SIZE;
data->mac_control = 0;
@@ -1896,6 +1897,11 @@ static int cpsw_probe(struct platform_device *pdev)
return irq;
cpsw->irqs_table[1] = irq;
+ irq = platform_get_irq_byname(pdev, "misc");
+ if (irq <= 0)
+ return irq;
+ cpsw->misc_irq = irq;
+
platform_set_drvdata(pdev, cpsw);
/* This may be required here for child devices. */
pm_runtime_enable(dev);
@@ -1916,7 +1922,7 @@ static int cpsw_probe(struct platform_device *pdev)
soc = soc_device_match(cpsw_soc_devices);
if (soc)
- cpsw->quirk_irq = 1;
+ cpsw->quirk_irq = true;
cpsw->rx_packet_max = rx_packet_max;
cpsw->descs_pool_size = descs_pool_size;
@@ -1975,6 +1981,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_unregister_netdev;
}
+ if (!cpsw->cpts)
+ goto skip_cpts;
+
+ ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching misc irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ /* Enable misc CPTS evnt_pend IRQ */
+ cpts_set_irqpoll(cpsw->cpts, false);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+skip_cpts:
ret = cpsw_register_notifiers(cpsw);
if (ret)
goto clean_unregister_netdev;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index d0b6c418a870..a399f3659346 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -28,6 +28,8 @@
#include "cpsw_sl.h"
#include "davinci_cpdma.h"
+#define CPTS_N_ETX_TS 4
+
int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -112,6 +114,18 @@ irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->misc_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
+ cpts_misc_interrupt(cpsw->cpts);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+ return IRQ_HANDLED;
+}
+
int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
{
struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
@@ -522,7 +536,8 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
if (!cpts_node)
cpts_node = cpsw->dev->of_node;
- cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node);
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
+ CPTS_N_ETX_TS);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
@@ -1340,7 +1355,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
ret = CPSW_XDP_PASS;
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
+ xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto drop;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index b8d7b924ee3d..bf4e179b4ca4 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -350,6 +350,7 @@ struct cpsw_common {
bool rx_irq_disabled;
bool tx_irq_disabled;
u32 irqs_table[IRQ_NUM];
+ int misc_irq;
struct cpts *cpts;
struct devlink *devlink;
int rx_ch_num, tx_ch_num;
@@ -442,6 +443,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
struct page *page, int port);
irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
+irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id);
int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget);
int cpsw_tx_poll(struct napi_struct *napi_tx, int budget);
int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 729ce09dded9..7c55d395de2c 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -21,16 +21,21 @@
#include "cpts.h"
#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define CPTS_SKB_RX_TX_TMO 100 /*ms */
+#define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */
struct cpts_skb_cb_data {
+ u32 skb_mtype_seqid;
unsigned long tmo;
};
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
-static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
- u16 ts_seqid, u8 ts_msgtype);
+static int cpts_event_port(struct cpts_event *event)
+{
+ return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK;
+}
static int event_expired(struct cpts_event *event)
{
@@ -71,7 +76,7 @@ static int cpts_purge_events(struct cpts *cpts)
}
if (removed)
- pr_debug("cpts: event pool cleaned up %d\n", removed);
+ dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
return removed ? 0 : -1;
}
@@ -94,132 +99,126 @@ static void cpts_purge_txq(struct cpts *cpts)
dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
}
-static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
-{
- struct sk_buff *skb, *tmp;
- u16 seqid;
- u8 mtype;
- bool found = false;
-
- mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
- seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
-
- /* no need to grab txq.lock as access is always done under cpts->lock */
- skb_queue_walk_safe(&cpts->txq, skb, tmp) {
- struct skb_shared_hwtstamps ssh;
- unsigned int class = ptp_classify_raw(skb);
- struct cpts_skb_cb_data *skb_cb =
- (struct cpts_skb_cb_data *)skb->cb;
-
- if (cpts_match(skb, class, seqid, mtype)) {
- u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
-
- memset(&ssh, 0, sizeof(ssh));
- ssh.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &ssh);
- found = true;
- __skb_unlink(skb, &cpts->txq);
- dev_consume_skb_any(skb);
- dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
- mtype, seqid);
- break;
- }
-
- if (time_after(jiffies, skb_cb->tmo)) {
- /* timeout any expired skbs over 1s */
- dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
- __skb_unlink(skb, &cpts->txq);
- dev_consume_skb_any(skb);
- }
- }
-
- return found;
-}
-
/*
* Returns zero if matching event type was found.
*/
static int cpts_fifo_read(struct cpts *cpts, int match)
{
+ struct ptp_clock_event pevent;
+ bool need_schedule = false;
+ struct cpts_event *event;
+ unsigned long flags;
int i, type = -1;
u32 hi, lo;
- struct cpts_event *event;
+
+ spin_lock_irqsave(&cpts->lock, flags);
for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
if (cpts_fifo_pop(cpts, &hi, &lo))
break;
if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
- pr_err("cpts: event pool empty\n");
- return -1;
+ dev_warn(cpts->dev, "cpts: event pool empty\n");
+ break;
}
event = list_first_entry(&cpts->pool, struct cpts_event, list);
- event->tmo = jiffies + 2;
event->high = hi;
event->low = lo;
+ event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
type = event_type(event);
+
+ dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
+ type, event->high, event->low);
switch (type) {
- case CPTS_EV_TX:
- if (cpts_match_tx_ts(cpts, event)) {
- /* if the new event matches an existing skb,
- * then don't queue it
- */
- break;
- }
- /* fall through */
case CPTS_EV_PUSH:
+ WRITE_ONCE(cpts->cur_timestamp, lo);
+ timecounter_read(&cpts->tc);
+ if (cpts->mult_new) {
+ cpts->cc.mult = cpts->mult_new;
+ cpts->mult_new = 0;
+ }
+ if (!cpts->irq_poll)
+ complete(&cpts->ts_push_complete);
+ break;
+ case CPTS_EV_TX:
case CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
+
list_del_init(&event->list);
list_add_tail(&event->list, &cpts->events);
+ need_schedule = true;
break;
case CPTS_EV_ROLL:
case CPTS_EV_HALF:
+ break;
case CPTS_EV_HW:
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ pevent.index = cpts_event_port(event) - 1;
+ ptp_clock_event(cpts->clock, &pevent);
break;
default:
- pr_err("cpts: unknown event type\n");
+ dev_err(cpts->dev, "cpts: unknown event type\n");
break;
}
if (type == match)
break;
}
+
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (!cpts->irq_poll && need_schedule)
+ ptp_schedule_worker(cpts->clock, 0);
+
return type == match ? 0 : -1;
}
+void cpts_misc_interrupt(struct cpts *cpts)
+{
+ cpts_fifo_read(cpts, -1);
+}
+EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
+
static u64 cpts_systim_read(const struct cyclecounter *cc)
{
- u64 val = 0;
- struct cpts_event *event;
- struct list_head *this, *next;
struct cpts *cpts = container_of(cc, struct cpts, cc);
+ return READ_ONCE(cpts->cur_timestamp);
+}
+
+static void cpts_update_cur_time(struct cpts *cpts, int match,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+
+ reinit_completion(&cpts->ts_push_complete);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
cpts_write32(cpts, TS_PUSH, ts_push);
- if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
- pr_err("cpts: unable to obtain a time stamp\n");
+ cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct cpts_event, list);
- if (event_type(event) == CPTS_EV_PUSH) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- val = event->low;
- break;
- }
- }
+ if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1)
+ dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
- return val;
+ if (!cpts->irq_poll &&
+ !wait_for_completion_timeout(&cpts->ts_push_complete, HZ))
+ dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n");
}
/* PTP clock operations */
static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
- u64 adj;
- u32 diff, mult;
- int neg_adj = 0;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ int neg_adj = 0;
+ u32 diff, mult;
+ u64 adj;
if (ppb < 0) {
neg_adj = 1;
@@ -230,38 +229,40 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
adj *= ppb;
diff = div_u64(adj, 1000000000ULL);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
- timecounter_read(&cpts->tc);
+ cpts->mult_new = neg_adj ? mult - diff : mult + diff;
- cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
-
- spin_unlock_irqrestore(&cpts->lock, flags);
+ cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
timecounter_adjtime(&cpts->tc, delta);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
-static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
- u64 ns;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
- spin_lock_irqsave(&cpts->lock, flags);
ns = timecounter_read(&cpts->tc);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
@@ -271,15 +272,38 @@ static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
static int cpts_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- u64 ns;
- unsigned long flags;
struct cpts *cpts = container_of(ptp, struct cpts, info);
+ u64 ns;
ns = timespec64_to_ns(ts);
- spin_lock_irqsave(&cpts->lock, flags);
+ mutex_lock(&cpts->ptp_clk_mutex);
timecounter_init(&cpts->tc, &cpts->cc, ns);
- spin_unlock_irqrestore(&cpts->lock, flags);
+ mutex_unlock(&cpts->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int cpts_extts_enable(struct cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ v = cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(8 + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(8 + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ cpts_write32(cpts, v, control);
+
+ mutex_unlock(&cpts->ptp_clk_mutex);
return 0;
}
@@ -287,28 +311,120 @@ static int cpts_ptp_settime(struct ptp_clock_info *ptp,
static int cpts_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return cpts_extts_enable(cpts, rq->extts.index, on);
+ default:
+ break;
+ }
+
return -EOPNOTSUPP;
}
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->high &
+ ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
+ (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
+ (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(event->timestamp);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 1s */
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void cpts_process_events(struct cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct cpts_event *event;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
static long cpts_overflow_check(struct ptp_clock_info *ptp)
{
struct cpts *cpts = container_of(ptp, struct cpts, info);
unsigned long delay = cpts->ov_check_period;
- struct timespec64 ts;
unsigned long flags;
+ u64 ns;
- spin_lock_irqsave(&cpts->lock, flags);
- ts = ns_to_timespec64(timecounter_read(&cpts->tc));
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts_update_cur_time(cpts, -1, NULL);
+ ns = timecounter_read(&cpts->tc);
+
+ cpts_process_events(cpts);
+ spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq)) {
cpts_purge_txq(cpts);
if (!skb_queue_empty(&cpts->txq))
delay = CPTS_SKB_TX_WORK_TIMEOUT;
}
- spin_unlock_irqrestore(&cpts->lock, flags);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
- pr_debug("cpts overflow check at %lld.%09ld\n",
- (long long)ts.tv_sec, ts.tv_nsec);
+ dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
+ mutex_unlock(&cpts->ptp_clk_mutex);
return (long)delay;
}
@@ -321,18 +437,21 @@ static const struct ptp_clock_info cpts_info = {
.pps = 0,
.adjfreq = cpts_ptp_adjfreq,
.adjtime = cpts_ptp_adjtime,
- .gettime64 = cpts_ptp_gettime,
+ .gettimex64 = cpts_ptp_gettimeex,
.settime64 = cpts_ptp_settime,
.enable = cpts_ptp_enable,
.do_aux_work = cpts_overflow_check,
};
-static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
- u16 ts_seqid, u8 ts_msgtype)
+static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
- u16 *seqid;
- unsigned int offset = 0;
+ unsigned int ptp_class = ptp_classify_raw(skb);
u8 *msgtype, *data = skb->data;
+ unsigned int offset = 0;
+ u16 *seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
if (ptp_class & PTP_CLASS_VLAN)
offset += VLAN_HLEN;
@@ -360,25 +479,23 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
msgtype = data + offset;
seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ *mtype_seqid = (*msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
+ *mtype_seqid |= (ntohs(*seqid) & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
- return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
+ return 1;
}
-static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
+ int ev_type, u32 skb_mtype_seqid)
{
- u64 ns = 0;
- struct cpts_event *event;
struct list_head *this, *next;
- unsigned int class = ptp_classify_raw(skb);
+ struct cpts_event *event;
unsigned long flags;
- u16 seqid;
- u8 mtype;
-
- if (class == PTP_CLASS_NONE)
- return 0;
+ u32 mtype_seqid;
+ u64 ns = 0;
- spin_lock_irqsave(&cpts->lock, flags);
cpts_fifo_read(cpts, -1);
+ spin_lock_irqsave(&cpts->lock, flags);
list_for_each_safe(this, next, &cpts->events) {
event = list_entry(this, struct cpts_event, list);
if (event_expired(event)) {
@@ -386,29 +503,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
list_add(&event->list, &cpts->pool);
continue;
}
- mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
- seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
- if (ev_type == event_type(event) &&
- cpts_match(skb, class, seqid, mtype)) {
- ns = timecounter_cyc2time(&cpts->tc, event->low);
+
+ mtype_seqid = event->high &
+ ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
+ (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
+ (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
+
+ if (mtype_seqid == skb_mtype_seqid) {
+ ns = event->timestamp;
list_del_init(&event->list);
list_add(&event->list, &cpts->pool);
break;
}
}
-
- if (ev_type == CPTS_EV_TX && !ns) {
- struct cpts_skb_cb_data *skb_cb =
- (struct cpts_skb_cb_data *)skb->cb;
- /* Not found, add frame to queue for processing later.
- * The periodic FIFO check will handle this.
- */
- skb_get(skb);
- /* get the timestamp for timeouts */
- skb_cb->tmo = jiffies + msecs_to_jiffies(100);
- __skb_queue_tail(&cpts->txq, skb);
- ptp_schedule_worker(cpts->clock, 0);
- }
spin_unlock_irqrestore(&cpts->lock, flags);
return ns;
@@ -416,10 +523,21 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
- u64 ns;
+ struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
struct skb_shared_hwtstamps *ssh;
+ int ret;
+ u64 ns;
- ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
+ ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+
+ skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
+ __func__, skb_cb->skb_mtype_seqid);
+
+ ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
if (!ns)
return;
ssh = skb_hwtstamps(skb);
@@ -430,17 +548,27 @@ EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
- u64 ns;
- struct skb_shared_hwtstamps ssh;
+ struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ int ret;
if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
return;
- ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
- if (!ns)
+
+ ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
return;
- memset(&ssh, 0, sizeof(ssh));
- ssh.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &ssh);
+
+ skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
+ __func__, skb_cb->skb_mtype_seqid);
+
+ /* Always defer TX TS processing to PTP worker */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->clock, 0);
}
EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
@@ -632,7 +760,7 @@ of_error:
}
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node)
+ struct device_node *node, u32 n_ext_ts)
{
struct cpts *cpts;
int ret;
@@ -643,7 +771,10 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->dev = dev;
cpts->reg = (struct cpsw_cpts __iomem *)regs;
+ cpts->irq_poll = true;
spin_lock_init(&cpts->lock);
+ mutex_init(&cpts->ptp_clk_mutex);
+ init_completion(&cpts->ts_push_complete);
ret = cpts_of_parse(cpts, node);
if (ret)
@@ -668,6 +799,9 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ if (n_ext_ts)
+ cpts->info.n_ext_ts = n_ext_ts;
+
cpts_calc_mult_shift(cpts);
/* save cc.mult original value as it can be modified
* by cpts_ptp_adjfreq().
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index bb997c11ee15..07222f651d2e 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -94,6 +94,7 @@ struct cpts_event {
unsigned long tmo;
u32 high;
u32 low;
+ u64 timestamp;
};
struct cpts {
@@ -103,7 +104,7 @@ struct cpts {
int rx_enable;
struct ptp_clock_info info;
struct ptp_clock *clock;
- spinlock_t lock; /* protects time registers */
+ spinlock_t lock; /* protects fifo/events */
u32 cc_mult; /* for the nominal frequency */
struct cyclecounter cc;
struct timecounter tc;
@@ -114,6 +115,12 @@ struct cpts {
struct cpts_event pool_data[CPTS_MAX_EVENTS];
unsigned long ov_check_period;
struct sk_buff_head txq;
+ u64 cur_timestamp;
+ u32 mult_new;
+ struct mutex ptp_clk_mutex; /* sync PTP interface and worker */
+ bool irq_poll;
+ struct completion ts_push_complete;
+ u32 hw_ts_enable;
};
void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
@@ -121,8 +128,9 @@ void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
int cpts_register(struct cpts *cpts);
void cpts_unregister(struct cpts *cpts);
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node);
+ struct device_node *node, u32 n_ext_ts);
void cpts_release(struct cpts *cpts);
+void cpts_misc_interrupt(struct cpts *cpts);
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
@@ -134,6 +142,11 @@ static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
return true;
}
+static inline void cpts_set_irqpoll(struct cpts *cpts, bool en)
+{
+ cpts->irq_poll = en;
+}
+
#else
struct cpts;
@@ -146,7 +159,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
static inline
struct cpts *cpts_create(struct device *dev, void __iomem *regs,
- struct device_node *node)
+ struct device_node *node, u32 n_ext_ts)
{
return NULL;
}
@@ -169,6 +182,14 @@ static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
return false;
}
+
+static inline void cpts_misc_interrupt(struct cpts *cpts)
+{
+}
+
+static inline void cpts_set_irqpoll(struct cpts *cpts, bool en)
+{
+}
#endif
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 38b7f6d35759..702fdc393da0 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -397,6 +397,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
data->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!data->regs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
index ad7cfc1316ce..38cc12f9f133 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -64,8 +64,8 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
return ERR_PTR(-ENOMEM);
pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1);
- if (IS_ERR(pool->gen_pool)) {
- ret = PTR_ERR(pool->gen_pool);
+ if (!pool->gen_pool) {
+ ret = -ENOMEM;
dev_err(pool->dev, "pool create failed %d\n", ret);
kfree_const(pool_name);
goto gen_pool_create_fail;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index fdbae734acce..28093923a7fb 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3716,7 +3716,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
if (!cpts_node)
cpts_node = of_node_get(node);
- gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg, cpts_node);
+ gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
+ cpts_node, 0);
of_node_put(cpts_node);
if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
ret = PTR_ERR(gbe_dev->cpts);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index ad465202980a..857709828058 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -70,7 +70,7 @@ MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
MODULE_LICENSE("GPL");
/* Turn on debugging.
- * See Documentation/networking/device_drivers/ti/tlan.txt for details
+ * See Documentation/networking/device_drivers/ti/tlan.rst for details
*/
static int debug;
module_param(debug, int, 0);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 070dd6fa9401..310e6839c6e5 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1150,7 +1150,7 @@ static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
* gelic_net_poll_controller - artificial interrupt for netconsole etc.
* @netdev: interface device structure
*
- * see Documentation/networking/netconsole.txt
+ * see Documentation/networking/netconsole.rst
*/
void gelic_net_poll_controller(struct net_device *netdev)
{
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6576271642c1..3902b3aeb0c2 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1615,7 +1615,7 @@ spider_net_interrupt(int irq, void *ptr)
* spider_net_poll_controller - artificial interrupt for netconsole etc.
* @netdev: interface device structure
*
- * see Documentation/networking/netconsole.txt
+ * see Documentation/networking/netconsole.rst
*/
static void
spider_net_poll_controller(struct net_device *netdev)
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index a962097b58c6..6cff5f7d57c4 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
+ depends on PCI || ARCH_VT8500 || COMPILE_TEST
depends on HAS_DMA
select CRC32
select MII
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 3e313e71ae36..929244064abd 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1410,9 +1410,9 @@ static int temac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(lp->regs)) {
+ if (!lp->regs) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
- return PTR_ERR(lp->regs);
+ return -ENOMEM;
}
/* Select register access functions with the specified
@@ -1505,10 +1505,10 @@ static int temac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(lp->sdma_regs)) {
+ if (!lp->sdma_regs) {
dev_err(&pdev->dev,
"could not map DMA registers\n");
- return PTR_ERR(lp->sdma_regs);
+ return -ENOMEM;
}
if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;