aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/ni65.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c107
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c33
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c11
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1457
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h187
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c197
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c195
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h109
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c181
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c15
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c21
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c7
-rw-r--r--drivers/net/ethernet/cortina/gemini.c9
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c21
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c39
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c86
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c97
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h5
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c97
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c101
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c78
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c46
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c363
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h60
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c414
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c151
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1052
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c24
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c110
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c80
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c146
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c200
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c565
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c655
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c543
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h39
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c9
-rw-r--r--drivers/net/ethernet/jme.c15
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c19
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c75
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c45
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c999
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h123
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c404
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c256
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c316
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1068
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c446
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c240
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h7
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c401
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h49
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c145
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.h41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c160
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c197
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c200
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c17
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c3
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig9
-rw-r--r--drivers/net/ethernet/qlogic/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c82
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c122
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h35
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c117
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/Makefile8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2353
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2024
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c735
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5027
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1285
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c12
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c13
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c3
-rw-r--r--drivers/net/ethernet/realtek/Kconfig9
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1087
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c18
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1038
-rw-r--r--drivers/net/ethernet/sgi/meth.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c68
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h132
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c787
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c192
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c435
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c173
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c516
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
371 files changed, 18022 insertions, 18051 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 147051404194..8785c2ff3825 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -847,8 +847,7 @@ static void poll_vortex(struct net_device *dev)
static int vortex_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (!ndev || !netif_running(ndev))
return 0;
@@ -861,8 +860,7 @@ static int vortex_suspend(struct device *dev)
static int vortex_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
int err;
if (!ndev || !netif_running(ndev))
@@ -2175,7 +2173,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr = skb_frag_dma_map(vp->gendev, frag,
0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(vp->gendev, dma_addr)) {
for(i = i-1; i >= 0; i--)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 010a2f48aea5..2a9f8643629c 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -110,7 +110,7 @@ static void greth_print_tx_packet(struct sk_buff *skb)
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb_frag_address(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].size, true);
+ skb_frag_size(&skb_shinfo(skb)->frags[i]), true);
}
}
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index edbb4b3604c7..174344c450af 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2426,7 +2426,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
- struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
struct phy_device *phydev = adapter->netdev->phydev;
dma_addr_t dma_addr;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2488,11 +2488,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
frag++;
}
} else {
- desc[frag].len_vlan = frags[i - 1].size;
+ desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
&frags[i - 1],
0,
- frags[i - 1].size,
+ desc[frag].len_vlan,
DMA_TO_DEVICE);
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 650d1bae5f56..1793950f0582 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1100,7 +1100,6 @@ static int au1000_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to retrieve IRQ\n");
err = -ENODEV;
goto out;
}
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 87ff5d6d1b22..c6c2a54c1121 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -697,16 +697,14 @@ static void ni65_free_buffer(struct priv *p)
for(i=0;i<TMDNUM;i++) {
kfree(p->tmdbounce[i]);
#ifdef XMT_VIA_SKB
- if(p->tmd_skb[i])
- dev_kfree_skb(p->tmd_skb[i]);
+ dev_kfree_skb(p->tmd_skb[i]);
#endif
}
for(i=0;i<RMDNUM;i++)
{
#ifdef RCV_VIA_SKB
- if(p->recv_skb[i])
- dev_kfree_skb(p->recv_skb[i]);
+ dev_kfree_skb(p->recv_skb[i]);
#else
kfree(p->recvbounce[i]);
#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b91143947ed2..b0a6c96b6ef4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -438,7 +438,6 @@ static const struct file_operations xi2c_reg_value_fops = {
void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
{
- struct dentry *pfile;
char *buf;
/* Set defaults */
@@ -451,88 +450,48 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
return;
pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
- if (!pdata->xgbe_debugfs) {
- netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
- kfree(buf);
- return;
- }
- pfile = debugfs_create_file("xgmac_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xgmac_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
- pfile = debugfs_create_file("xgmac_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xgmac_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xgmac_reg_value_fops);
- pfile = debugfs_create_file("xpcs_mmd", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_mmd_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_mmd", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
- pfile = debugfs_create_file("xpcs_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
- pfile = debugfs_create_file("xpcs_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xpcs_reg_value_fops);
if (pdata->xprop_regs) {
- pfile = debugfs_create_file("xprop_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xprop_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xprop_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xprop_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
+ debugfs_create_file("xprop_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xprop_reg_addr_fops);
+
+ debugfs_create_file("xprop_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xprop_reg_value_fops);
}
if (pdata->xi2c_regs) {
- pfile = debugfs_create_file("xi2c_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xi2c_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xi2c_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xi2c_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
+ debugfs_create_file("xi2c_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xi2c_reg_addr_fops);
+
+ debugfs_create_file("xi2c_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xi2c_reg_value_fops);
}
if (pdata->vdata->an_cdr_workaround) {
- pfile = debugfs_create_bool("an_cdr_workaround", 0600,
- pdata->xgbe_debugfs,
- &pdata->debugfs_an_cdr_workaround);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_bool failed\n");
-
- pfile = debugfs_create_bool("an_cdr_track_early", 0600,
- pdata->xgbe_debugfs,
- &pdata->debugfs_an_cdr_track_early);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_bool failed\n");
+ debugfs_create_bool("an_cdr_workaround", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_workaround);
+
+ debugfs_create_bool("an_cdr_track_early", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_track_early);
}
kfree(buf);
@@ -546,7 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- struct dentry *pfile;
char *buf;
if (!pdata->xgbe_debugfs)
@@ -559,11 +517,8 @@ void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
goto out;
- pfile = debugfs_rename(pdata->xgbe_debugfs->d_parent,
- pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_rename failed\n");
+ debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
+ pdata->xgbe_debugfs->d_parent, buf);
out:
kfree(buf);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 533094233659..230726d7b74f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -526,7 +526,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t skb_dma;
unsigned int start_index, cur_index;
unsigned int offset, tso, vlan, datalen, len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 3dd0cecddba8..98f8f2033154 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1833,7 +1833,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, struct sk_buff *skb,
struct xgbe_packet_data *packet)
{
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int context_desc;
unsigned int len;
unsigned int i;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index d0f3dfb88202..4ebd2410185a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -301,7 +301,6 @@ static int xgbe_platform_probe(struct platform_device *pdev)
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
struct platform_device *phy_pdev;
- struct resource *res;
const char *phy_mode;
unsigned int phy_memnum, phy_irqnum;
unsigned int dma_irqnum, dma_irqend;
@@ -353,8 +352,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
}
/* Obtain the mmio areas for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->xgmac_regs)) {
dev_err(dev, "xgmac ioremap failed\n");
ret = PTR_ERR(pdata->xgmac_regs);
@@ -363,8 +361,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ pdata->xpcs_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pdata->xpcs_regs)) {
dev_err(dev, "xpcs ioremap failed\n");
ret = PTR_ERR(pdata->xpcs_regs);
@@ -373,8 +370,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+ pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(pdata->rxtx_regs);
@@ -383,8 +380,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir0_regs = devm_ioremap_resource(dev, res);
+ pdata->sir0_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
ret = PTR_ERR(pdata->sir0_regs);
@@ -393,8 +390,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir1_regs = devm_ioremap_resource(dev, res);
+ pdata->sir1_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
ret = PTR_ERR(pdata->sir1_regs);
@@ -467,10 +464,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq 0 failed\n");
+ if (ret < 0)
goto err_io;
- }
pdata->dev_irq = ret;
/* Get the per channel DMA interrupts */
@@ -479,12 +474,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
ret = platform_get_irq(pdata->platdev, dma_irqnum++);
- if (ret < 0) {
- netdev_err(pdata->netdev,
- "platform_get_irq %u failed\n",
- dma_irqnum - 1);
+ if (ret < 0)
goto err_io;
- }
pdata->channel_irq[i] = ret;
}
@@ -496,10 +487,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
/* Get the auto-negotiation interrupt */
ret = platform_get_irq(phy_pdev, phy_irqnum++);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq phy 0 failed\n");
+ if (ret < 0)
goto err_io;
- }
pdata->an_irq = ret;
/* Configure the netdev resource */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 79048cc46703..02b4f3af02b5 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -54,10 +54,8 @@ static int xge_get_resources(struct xge_pdata *pdata)
}
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "Unable to get irq\n");
+ if (ret < 0)
return ret;
- }
pdata->resources.irq = ret;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 61a465097cb8..5f657879134e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -712,11 +712,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
udelay(5);
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
- acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
- "_RST", NULL, NULL);
- } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
- "_INI")) {
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
"_INI", NULL, NULL);
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 10b1c053e70a..d8612131c55e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -340,7 +340,8 @@ static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < 2 && i < nr_frags; i++)
- len += skb_shinfo(skb)->frags[i].size;
+ len += skb_frag_size(
+ &skb_shinfo(skb)->frags[i]);
/* HW requires header must reside in 3 buffer */
if (unlikely(hdr_len > len)) {
@@ -1616,7 +1617,6 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev = pdata->pdev;
- struct device *dev = &pdev->dev;
int i, ret, max_irqs;
if (phy_interface_mode_is_rgmii(pdata->phy_mode))
@@ -1636,9 +1636,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
pdata->cq_cnt = max_irqs / 2;
break;
}
- dev_err(dev, "Unable to get ENET IRQ\n");
- ret = ret ? : -ENXIO;
- return ret;
+ return ret ? : -ENXIO;
}
pdata->irqs[i] = ret;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index 6453fc2ebb1f..f482ced2cadd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -460,12 +460,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p)
}
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
- acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
- "_RST", NULL, NULL);
- else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
"_INI", NULL, NULL);
+ }
#endif
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 133eb91c542e..304b5d43f236 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -393,11 +393,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
udelay(5);
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
- acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
- "_RST", NULL, NULL);
- } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
- "_INI")) {
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
"_INI", NULL, NULL);
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index c40daad515d5..a58185b1d8bf 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -815,8 +815,8 @@ static int reverse6[64] = {
static unsigned int
crc416(unsigned int curval, unsigned short nxtval)
{
- register unsigned int counter, cur = curval, next = nxtval;
- register int high_crc_set, low_data_set;
+ unsigned int counter, cur = curval, next = nxtval;
+ int high_crc_set, low_data_set;
/* Swap bytes */
next = ((next & 0x00FF) << 8) | (next >> 8);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 6703960c7cf5..7548247455d7 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1148,7 +1148,7 @@ static int ag71xx_rings_init(struct ag71xx *ag)
return -ENOMEM;
}
- rx->buf = &tx->buf[BIT(tx->order)];
+ rx->buf = &tx->buf[tx_size];
rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
@@ -1686,7 +1686,7 @@ static int ag71xx_probe(struct platform_device *pdev)
}
ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!ag->mac_base) {
err = -ENOMEM;
goto err_free;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e3538ba7d0e7..d4bbcdfd691a 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1465,9 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
tpd->len = cpu_to_le16(maplen);
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
if (++txq->write_idx == txq->count)
txq->write_idx = 0;
@@ -1879,8 +1877,7 @@ static void alx_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int alx_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_priv *alx = dev_get_drvdata(dev);
if (!netif_running(alx->dev))
return 0;
@@ -1891,8 +1888,7 @@ static int alx_suspend(struct device *dev)
static int alx_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_priv *alx = dev_get_drvdata(dev);
struct alx_hw *hw = &alx->hw;
int err;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index be7f9cebb675..2b239ecea05f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2150,9 +2150,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
@@ -2422,8 +2420,7 @@ static int atl1c_close(struct net_device *netdev)
static int atl1c_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u32 wufc = adapter->wol;
@@ -2437,7 +2434,7 @@ static int atl1c_suspend(struct device *dev)
if (wufc)
if (atl1c_phy_to_ps_link(hw) != 0)
- dev_dbg(&pdev->dev, "phy power saving failed");
+ dev_dbg(dev, "phy power saving failed");
atl1c_power_saving(hw, wufc);
@@ -2447,8 +2444,7 @@ static int atl1c_suspend(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int atl1c_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 7f14e010bfeb..4f7b65825c15 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1770,11 +1770,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i;
u16 seg_num;
- frag = &skb_shinfo(skb)->frags[f];
buf_len = skb_frag_size(frag);
seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b5c6dc914720..b498fd6a47d0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2256,10 +2256,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i, nseg;
- frag = &skb_shinfo(skb)->frags[f];
buf_len = skb_frag_size(frag);
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
@@ -2754,8 +2753,7 @@ static int atl1_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
static int atl1_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
@@ -2780,7 +2778,7 @@ static int atl1_suspend(struct device *dev)
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
if (val) {
if (netif_msg_ifdown(adapter))
- dev_printk(KERN_DEBUG, &pdev->dev,
+ dev_printk(KERN_DEBUG, dev,
"error getting speed/duplex\n");
goto disable_wol;
}
@@ -2837,8 +2835,7 @@ static int atl1_suspend(struct device *dev)
static int atl1_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1_adapter *adapter = netdev_priv(netdev);
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 3b3370a94a9c..37752d9514e7 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1351,10 +1351,8 @@ static int nb8800_probe(struct platform_device *pdev)
ops = match->data;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "No IRQ\n");
+ if (irq <= 0)
return -EINVAL;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 291e4afd4a1a..620cd3fc1fbc 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1693,7 +1693,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
+ struct resource *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
int i, ret;
@@ -1719,8 +1719,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out;
@@ -2762,15 +2761,13 @@ struct platform_driver bcm63xx_enetsw_driver = {
/* reserve & remap memory space shared between all macs */
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *p[3];
unsigned int i;
memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
for (i = 0; i < 3; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- p[i] = devm_ioremap_resource(&pdev->dev, res);
+ p[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(p[i]))
return PTR_ERR(p[i]);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 9483553ce444..7df887e4024c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -708,8 +708,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i];
skb = bcm_sysport_rx_refill(priv, cb);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
if (!cb->skb)
return -ENOMEM;
}
@@ -2420,12 +2419,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
struct device_node *dn;
struct net_device *dev;
const void *macaddr;
- struct resource *r;
u32 txq, rxq;
int ret;
dn = pdev->dev.of_node;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
of_id = of_match_node(bcm_sysport_of_match, dn);
if (!of_id || !of_id->data)
return -EINVAL;
@@ -2473,7 +2470,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_free_netdev;
}
- priv->base = devm_ioremap_resource(&pdev->dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_free_netdev;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 6dc0dd91ad11..c46c1b1416f7 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -199,10 +199,8 @@ static int bgmac_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "MAC address not present in device tree\n");
bgmac->irq = platform_get_irq(pdev, 0);
- if (bgmac->irq < 0) {
- dev_err(&pdev->dev, "Unable to obtain IRQ\n");
+ if (bgmac->irq < 0)
return bgmac->irq;
- }
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base");
if (!regs) {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 4632dd5dbad1..148734b166f0 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -172,7 +172,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
flags = 0;
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
index = (index + 1) % BGMAC_TX_RING_SLOTS;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index dfdd14eadd57..fbc196b480b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8673,8 +8673,7 @@ bnx2_remove_one(struct pci_dev *pdev)
static int
bnx2_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnx2 *bp = netdev_priv(dev);
if (netif_running(dev)) {
@@ -8693,8 +8692,7 @@ bnx2_suspend(struct device *device)
static int
bnx2_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnx2 *bp = netdev_priv(dev);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8dce4069472b..402d9f50d92c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -116,6 +116,9 @@ enum board_idx {
BCM57508,
BCM57504,
BCM57502,
+ BCM57508_NPAR,
+ BCM57504_NPAR,
+ BCM57502_NPAR,
BCM58802,
BCM58804,
BCM58808,
@@ -161,6 +164,9 @@ static const struct {
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
+ [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
+ [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
+ [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -209,6 +215,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+ { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -242,6 +254,8 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
+ ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -828,16 +842,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return 0;
}
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
- u32 agg_bufs)
+static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 cp_cons, u16 curr)
+{
+ struct rx_agg_cmp *agg;
+
+ cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ return agg;
+}
+
+static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 agg_id, u16 curr)
+{
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
+
+ return &tpa_info->agg_arr[curr];
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
+ u16 start, u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt *bp = bnapi->bp;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u16 sw_prod = rxr->rx_sw_agg_prod;
+ bool p5_tpa = false;
u32 i;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ p5_tpa = true;
+
for (i = 0; i < agg_bufs; i++) {
u16 cons;
struct rx_agg_cmp *agg;
@@ -845,8 +884,10 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
struct rx_bd *prod_bd;
struct page *page;
- agg = (struct rx_agg_cmp *)
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ if (p5_tpa)
+ agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
+ else
+ agg = bnxt_get_agg(bp, cpr, idx, start + i);
cons = agg->rx_agg_cmp_opaque;
__clear_bit(cons, rxr->rx_agg_bmap);
@@ -874,7 +915,6 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
prod = NEXT_RX_AGG(prod);
sw_prod = NEXT_RX_AGG(sw_prod);
- cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
@@ -888,7 +928,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
{
unsigned int payload = offset_and_len >> 16;
unsigned int len = offset_and_len & 0xffff;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct page *page = data;
u16 prod = rxr->rx_prod;
struct sk_buff *skb;
@@ -919,7 +959,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
frag = &skb_shinfo(skb)->frags[0];
skb_frag_size_sub(frag, payload);
- frag->page_offset += payload;
+ skb_frag_off_add(frag, payload);
skb->data_len -= payload;
skb->tail += payload;
@@ -957,15 +997,19 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 cp_cons,
- u32 agg_bufs)
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ bool p5_tpa = false;
u32 i;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ p5_tpa = true;
+
for (i = 0; i < agg_bufs; i++) {
u16 cons, frag_len;
struct rx_agg_cmp *agg;
@@ -973,8 +1017,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct page *page;
dma_addr_t mapping;
- agg = (struct rx_agg_cmp *)
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ if (p5_tpa)
+ agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
+ else
+ agg = bnxt_get_agg(bp, cpr, idx, i);
cons = agg->rx_agg_cmp_opaque;
frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
@@ -1008,7 +1054,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
* allocated already.
*/
rxr->rx_agg_prod = prod;
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
+ bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
return NULL;
}
@@ -1021,7 +1067,6 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
skb->truesize += PAGE_SIZE;
prod = NEXT_RX_AGG(prod);
- cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
return skb;
@@ -1081,9 +1126,10 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_AGG_BUFS) >>
- RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return 0;
+
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
}
if (agg_bufs) {
@@ -1094,6 +1140,14 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return 0;
}
+static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
+{
+ if (BNXT_PF(bp))
+ queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
+ else
+ schedule_delayed_work(&bp->fw_reset_task, delay);
+}
+
static void bnxt_queue_sp_work(struct bnxt *bp)
{
if (BNXT_PF(bp))
@@ -1120,26 +1174,60 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
rxr->rx_next_cons = 0xffff;
}
+static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+ u16 idx = agg_id & MAX_TPA_P5_MASK;
+
+ if (test_bit(idx, map->agg_idx_bmap))
+ idx = find_first_zero_bit(map->agg_idx_bmap,
+ BNXT_AGG_IDX_BMAP_SIZE);
+ __set_bit(idx, map->agg_idx_bmap);
+ map->agg_id_tbl[agg_id] = idx;
+ return idx;
+}
+
+static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ __clear_bit(idx, map->agg_idx_bmap);
+}
+
+static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ return map->agg_id_tbl[agg_id];
+}
+
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
- u8 agg_id = TPA_START_AGG_ID(tpa_start);
- u16 cons, prod;
- struct bnxt_tpa_info *tpa_info;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnxt_tpa_info *tpa_info;
+ u16 cons, prod, agg_id;
struct rx_bd *prod_bd;
dma_addr_t mapping;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ agg_id = TPA_START_AGG_ID_P5(tpa_start);
+ agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
+ } else {
+ agg_id = TPA_START_AGG_ID(tpa_start);
+ }
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf = &rxr->rx_buf_ring[prod];
tpa_info = &rxr->rx_tpa[agg_id];
- if (unlikely(cons != rxr->rx_next_cons)) {
- netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
- cons, rxr->rx_next_cons);
+ if (unlikely(cons != rxr->rx_next_cons ||
+ TPA_START_ERROR(tpa_start))) {
+ netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
+ cons, rxr->rx_next_cons,
+ TPA_START_ERROR_CODE(tpa_start1));
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1184,6 +1272,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons);
@@ -1195,13 +1284,37 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons_rx_buf->data = NULL;
}
-static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
- u32 agg_bufs)
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
{
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
}
+#ifdef CONFIG_INET
+static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+#endif
+
static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
int payload_off, int tcp_ts,
struct sk_buff *skb)
@@ -1259,28 +1372,39 @@ static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
}
if (inner_mac_off) { /* tunnel */
- struct udphdr *uh = NULL;
__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
ETH_HLEN - 2));
- if (proto == htons(ETH_P_IP)) {
- struct iphdr *iph = (struct iphdr *)skb->data;
+ bnxt_gro_tunnel(skb, proto);
+ }
+#endif
+ return skb;
+}
- if (iph->protocol == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- } else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ int iphdr_len, nw_off;
- if (iph->nexthdr == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- }
- if (uh) {
- if (uh->check)
- skb_shinfo(skb)->gso_type |=
- SKB_GSO_UDP_TUNNEL_CSUM;
- else
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
+ inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
+ sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+ skb_set_transport_header(skb, nw_off + iphdr_len);
+
+ if (inner_mac_off) { /* tunnel */
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ bnxt_gro_tunnel(skb, proto);
}
#endif
return skb;
@@ -1327,28 +1451,8 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
return NULL;
}
- if (nw_off) { /* tunnel */
- struct udphdr *uh = NULL;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = (struct iphdr *)skb->data;
-
- if (iph->protocol == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- } else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
-
- if (iph->nexthdr == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- }
- if (uh) {
- if (uh->check)
- skb_shinfo(skb)->gso_type |=
- SKB_GSO_UDP_TUNNEL_CSUM;
- else
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
- }
+ if (nw_off) /* tunnel */
+ bnxt_gro_tunnel(skb, skb->protocol);
#endif
return skb;
}
@@ -1371,9 +1475,10 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
- RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
+ else
+ payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
if (likely(skb))
tcp_gro_complete(skb);
@@ -1401,14 +1506,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u8 agg_id = TPA_END_AGG_ID(tpa_end);
u8 *data_ptr, agg_bufs;
- u16 cp_cons = RING_CMP(*raw_cons);
unsigned int len;
struct bnxt_tpa_info *tpa_info;
dma_addr_t mapping;
struct sk_buff *skb;
+ u16 idx = 0, agg_id;
void *data;
+ bool gro;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
@@ -1418,26 +1523,43 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- tpa_info = &rxr->rx_tpa[agg_id];
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ agg_id = TPA_END_AGG_ID_P5(tpa_end);
+ agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
+ agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ if (unlikely(agg_bufs != tpa_info->agg_count)) {
+ netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
+ agg_bufs, tpa_info->agg_count);
+ agg_bufs = tpa_info->agg_count;
+ }
+ tpa_info->agg_count = 0;
+ *event |= BNXT_AGG_EVENT;
+ bnxt_free_agg_idx(rxr, agg_id);
+ idx = agg_id;
+ gro = !!(bp->flags & BNXT_FLAG_GRO);
+ } else {
+ agg_id = TPA_END_AGG_ID(tpa_end);
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ idx = RING_CMP(*raw_cons);
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+ return ERR_PTR(-EBUSY);
+
+ *event |= BNXT_AGG_EVENT;
+ idx = NEXT_CMP(idx);
+ }
+ gro = !!TPA_END_GRO(tpa_end);
+ }
data = tpa_info->data;
data_ptr = tpa_info->data_ptr;
prefetch(data_ptr);
len = tpa_info->len;
mapping = tpa_info->mapping;
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
-
- if (agg_bufs) {
- if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
- return ERR_PTR(-EBUSY);
-
- *event |= BNXT_AGG_EVENT;
- cp_cons = NEXT_CMP(cp_cons);
- }
-
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
if (agg_bufs > MAX_SKB_FRAGS)
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
agg_bufs, (int)MAX_SKB_FRAGS);
@@ -1447,7 +1569,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
} else {
@@ -1456,7 +1578,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
@@ -1471,7 +1593,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
kfree(data);
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1479,7 +1601,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
return NULL;
@@ -1508,12 +1630,24 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
}
- if (TPA_END_GRO(tpa_end))
+ if (gro)
skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
return skb;
}
+static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct rx_agg_cmp *rx_agg)
+{
+ u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
+ struct bnxt_tpa_info *tpa_info;
+
+ agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
+ tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
+}
+
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
@@ -1555,6 +1689,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
+ bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
+ goto next_rx_no_prod_no_len;
+ }
+
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
rxcmp1 = (struct rx_cmp_ext *)
@@ -1563,8 +1704,6 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
- cmp_type = RX_CMP_TYPE(rxcmp);
-
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1623,7 +1762,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
+ false);
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
@@ -1646,7 +1786,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
rc = -ENOMEM;
goto next_rx;
}
@@ -1666,7 +1807,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1765,6 +1906,33 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
return bnxt_rx_pkt(bp, cpr, raw_cons, event);
}
+u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg = fw_health->regs[reg_idx];
+ u32 reg_type, reg_off, val = 0;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
+ reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
+ switch (reg_type) {
+ case BNXT_FW_HEALTH_REG_TYPE_CFG:
+ pci_read_config_dword(bp->pdev, reg_off, &val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_GRC:
+ reg_off = fw_health->mapped_regs[reg_idx];
+ /* fall through */
+ case BNXT_FW_HEALTH_REG_TYPE_BAR0:
+ val = readl(bp->bar0 + reg_off);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR1:
+ val = readl(bp->bar1 + reg_off);
+ break;
+ }
+ if (reg_idx == BNXT_FW_RESET_INPROG_REG)
+ val &= fw_health->fw_reset_inprog_reg_mask;
+ return val;
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -1820,6 +1988,55 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+
+ bp->fw_reset_timestamp = jiffies;
+ bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
+ if (!bp->fw_reset_min_dsecs)
+ bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
+ bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
+ if (!bp->fw_reset_max_dsecs)
+ bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
+ if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ netdev_warn(bp->dev, "Firmware fatal reset event received\n");
+ set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ } else {
+ netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
+ bp->fw_reset_max_dsecs * 100);
+ }
+ set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
+ break;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+
+ if (!fw_health)
+ goto async_event_process_exit;
+
+ fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
+ fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+ if (!fw_health->enabled)
+ break;
+
+ if (netif_msg_drv(bp))
+ netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
+ fw_health->enabled, fw_health->master,
+ bnxt_fw_health_readl(bp,
+ BNXT_FW_RESET_CNT_REG),
+ bnxt_fw_health_readl(bp,
+ BNXT_FW_HEALTH_REG));
+ fw_health->tmr_multiplier =
+ DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
+ bp->current_interval * 10);
+ fw_health->tmr_counter = fw_health->tmr_multiplier;
+ fw_health->last_fw_heartbeat =
+ bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -2325,10 +2542,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_tpa_idx_map *map;
int j;
if (rxr->rx_tpa) {
- for (j = 0; j < MAX_TPA; j++) {
+ for (j = 0; j < bp->max_tpa; j++) {
struct bnxt_tpa_info *tpa_info =
&rxr->rx_tpa[j];
u8 *data = tpa_info->data;
@@ -2395,6 +2613,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
}
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
}
}
@@ -2483,6 +2704,61 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_tpa_info(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ kfree(rxr->rx_tpa[0].agg_arr);
+ rxr->rx_tpa[0].agg_arr = NULL;
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+ }
+}
+
+static int bnxt_alloc_tpa_info(struct bnxt *bp)
+{
+ int i, j, total_aggs = 0;
+
+ bp->max_tpa = MAX_TPA;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (!bp->max_tpa_v2)
+ return 0;
+ bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
+ total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
+ }
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct rx_agg_cmp *agg;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+ agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
+ rxr->rx_tpa[0].agg_arr = agg;
+ if (!agg)
+ return -ENOMEM;
+ for (j = 1; j < bp->max_tpa; j++)
+ rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
@@ -2490,6 +2766,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
if (!bp->rx_ring)
return;
+ bnxt_free_tpa_info(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
@@ -2503,9 +2780,6 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
page_pool_destroy(rxr->page_pool);
rxr->page_pool = NULL;
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
-
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -2539,7 +2813,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc, agg_rings = 0, tpa_rings = 0;
+ int i, rc = 0, agg_rings = 0;
if (!bp->rx_ring)
return -ENOMEM;
@@ -2547,9 +2821,6 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
agg_rings = 1;
- if (bp->flags & BNXT_FLAG_TPA)
- tpa_rings = 1;
-
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
@@ -2591,17 +2862,11 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;
-
- if (tpa_rings) {
- rxr->rx_tpa = kcalloc(MAX_TPA,
- sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
- }
}
}
- return 0;
+ if (bp->flags & BNXT_FLAG_TPA)
+ rc = bnxt_alloc_tpa_info(bp);
+ return rc;
}
static void bnxt_free_tx_rings(struct bnxt *bp)
@@ -2953,7 +3218,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
u8 *data;
dma_addr_t mapping;
- for (i = 0; i < MAX_TPA; i++) {
+ for (i = 0; i < bp->max_tpa; i++) {
data = __bnxt_alloc_rx_data(bp, &mapping,
GFP_KERNEL);
if (!data)
@@ -3376,6 +3641,9 @@ static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwrm_cmd_kong_resp_addr)
+ return 0;
+
bp->hwrm_cmd_kong_resp_addr =
dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&bp->hwrm_cmd_kong_resp_dma_addr,
@@ -3415,6 +3683,9 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwrm_short_cmd_req_addr)
+ return 0;
+
bp->hwrm_short_cmd_req_addr =
dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
&bp->hwrm_short_cmd_req_dma_addr,
@@ -3468,7 +3739,7 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
if (!bp->bnapi)
return;
- size = sizeof(struct ctx_hw_stats);
+ size = bp->hw_ring_stats_size;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3487,7 +3758,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
u32 size, i;
struct pci_dev *pdev = bp->pdev;
- size = sizeof(struct ctx_hw_stats);
+ size = bp->hw_ring_stats_size;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3869,6 +4140,32 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
+static int bnxt_hwrm_to_stderr(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int timeout, bool silent)
{
@@ -3886,6 +4183,9 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
+
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
if (msg_len > bp->hwrm_max_ext_req_len ||
!bp->hwrm_short_cmd_req_addr)
@@ -3950,6 +4250,9 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Ring channel doorbell */
writel(1, bp->bar0 + doorbell_offset);
+ if (!pci_is_enabled(bp->pdev))
+ return 0;
+
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
/* convert timeout to usec */
@@ -3981,9 +4284,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
- netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- le16_to_cpu(req->req_type));
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+ le16_to_cpu(req->req_type));
+ return -EBUSY;
}
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
@@ -4007,11 +4311,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (i >= tmo_count) {
- netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
- HWRM_TOTAL_TIMEOUT(i),
- le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), len);
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len);
+ return -EBUSY;
}
/* Last byte of resp contains valid bit */
@@ -4025,11 +4330,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
- netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
- HWRM_TOTAL_TIMEOUT(i),
- le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), len, *valid);
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len,
+ *valid);
+ return -EBUSY;
}
}
@@ -4043,7 +4350,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
le16_to_cpu(resp->req_type),
le16_to_cpu(resp->seq_id), rc);
- return rc;
+ return bnxt_hwrm_to_stderr(rc);
}
int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
@@ -4092,9 +4399,14 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
- for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
- __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
+ u16 event_id = bnxt_async_events_arr[i];
+ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ continue;
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ }
if (bmap && bmap_size) {
for (i = 0; i < bmap_size; i++) {
if (test_bit(i, bmap))
@@ -4112,6 +4424,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
+ u32 flags;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -4121,7 +4434,11 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
FUNC_DRV_RGTR_REQ_ENABLES_VER);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
- req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
+ FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
+ req.flags = cpu_to_le32(flags);
req.ver_maj_8b = DRV_VER_MAJ;
req.ver_min_8b = DRV_VER_MIN;
req.ver_upd_8b = DRV_VER_UPD;
@@ -4156,10 +4473,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
- else if (resp->flags &
- cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ if (!rc && (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4414,6 +4729,7 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input req = {0};
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
@@ -4453,9 +4769,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n;
}
- segs = ilog2(nsegs);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ segs = MAX_TPA_SEGS_P5;
+ max_aggs = bp->max_tpa;
+ } else {
+ segs = ilog2(nsegs);
+ }
req.max_agg_segs = cpu_to_le16(segs);
- req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+ req.max_aggs = cpu_to_le16(max_aggs);
req.min_agg_len = cpu_to_le32(512);
}
@@ -4576,7 +4897,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -EIO;
+ return rc;
}
return 0;
}
@@ -4739,8 +5060,6 @@ static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return rc;
bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
}
return rc;
@@ -4800,6 +5119,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
struct hwrm_vnic_qcaps_input req = {0};
int rc;
+ bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
+ bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
if (bp->hwrm_spec_code < 0x10600)
return 0;
@@ -4815,6 +5136,10 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
+ bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bp->max_tpa_v2)
+ bp->hw_ring_stats_size =
+ sizeof(struct ctx_hw_stats_ext);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4874,8 +5199,6 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc)
- break;
bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5194,6 +5517,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
u16 error_code;
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
req.ring_type = ring_type;
req.ring_id = cpu_to_le16(ring->fw_ring_id);
@@ -5331,7 +5657,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
- return -EIO;
+ return rc;
}
hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
@@ -5495,7 +5821,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -ENOMEM;
+ return rc;
if (bp->hwrm_spec_code < 0x10601)
bp->hw_resc.resv_tx_rings = tx_rings;
@@ -5520,7 +5846,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
cp_rings, stats, vnics);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -ENOMEM;
+ return rc;
rc = bnxt_hwrm_get_rings(bp);
return rc;
@@ -5701,9 +6027,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -ENOMEM;
- return 0;
+ return rc;
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -5731,9 +6055,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -ENOMEM;
- return 0;
+ return rc;
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -5995,8 +6317,6 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc)
- break;
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
@@ -6016,6 +6336,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+ req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -6057,6 +6378,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ } else {
+ bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
flags = le16_to_cpu(resp->flags);
@@ -6292,8 +6615,6 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
}
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -6555,10 +6876,8 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc) {
- rc = -EIO;
+ if (rc)
goto hwrm_func_resc_qcaps_exit;
- }
hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
if (!all)
@@ -6626,6 +6945,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
+ bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
bp->tx_push_thresh = 0;
if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
@@ -6657,6 +6978,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
@@ -6726,6 +7048,103 @@ hwrm_cfa_adv_qcaps_exit:
return rc;
}
+static int bnxt_map_fw_health_regs(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg_base = 0xffffffff;
+ int i;
+
+ /* Only pre-map the monitoring GRC registers using window 3 */
+ for (i = 0; i < 4; i++) {
+ u32 reg = fw_health->regs[i];
+
+ if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
+ continue;
+ if (reg_base == 0xffffffff)
+ reg_base = reg & BNXT_GRC_BASE_MASK;
+ if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
+ return -ERANGE;
+ fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
+ (reg & BNXT_GRC_OFFSET_MASK);
+ }
+ if (reg_base == 0xffffffff)
+ return 0;
+
+ writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+ BNXT_FW_HEALTH_WIN_MAP_OFF);
+ return 0;
+}
+
+static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
+{
+ struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct hwrm_error_recovery_qcfg_input req = {0};
+ int rc, i;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto err_recovery_out;
+ if (!fw_health) {
+ fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
+ bp->fw_health = fw_health;
+ if (!fw_health) {
+ rc = -ENOMEM;
+ goto err_recovery_out;
+ }
+ }
+ fw_health->flags = le32_to_cpu(resp->flags);
+ if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
+ !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
+ rc = -EINVAL;
+ goto err_recovery_out;
+ }
+ fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
+ fw_health->master_func_wait_dsecs =
+ le32_to_cpu(resp->master_func_wait_period);
+ fw_health->normal_func_wait_dsecs =
+ le32_to_cpu(resp->normal_func_wait_period);
+ fw_health->post_reset_wait_dsecs =
+ le32_to_cpu(resp->master_func_wait_period_after_reset);
+ fw_health->post_reset_max_wait_dsecs =
+ le32_to_cpu(resp->max_bailout_time_after_reset);
+ fw_health->regs[BNXT_FW_HEALTH_REG] =
+ le32_to_cpu(resp->fw_health_status_reg);
+ fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
+ le32_to_cpu(resp->fw_heartbeat_reg);
+ fw_health->regs[BNXT_FW_RESET_CNT_REG] =
+ le32_to_cpu(resp->fw_reset_cnt_reg);
+ fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
+ le32_to_cpu(resp->reset_inprogress_reg);
+ fw_health->fw_reset_inprog_reg_mask =
+ le32_to_cpu(resp->reset_inprogress_reg_mask);
+ fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
+ if (fw_health->fw_reset_seq_cnt >= 16) {
+ rc = -EINVAL;
+ goto err_recovery_out;
+ }
+ for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
+ fw_health->fw_reset_seq_regs[i] =
+ le32_to_cpu(resp->reset_reg[i]);
+ fw_health->fw_reset_seq_vals[i] =
+ le32_to_cpu(resp->reset_reg_val[i]);
+ fw_health->fw_reset_seq_delay_msec[i] =
+ resp->delay_after_reset[i];
+ }
+err_recovery_out:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc)
+ rc = bnxt_map_fw_health_regs(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return rc;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -6785,20 +7204,30 @@ qportcfg_exit:
return rc;
}
-static int bnxt_hwrm_ver_get(struct bnxt *bp)
+static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
{
- int rc;
struct hwrm_ver_get_input req = {0};
- struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
- u32 dev_caps_cfg;
+ int rc;
- bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
+ silent);
+ return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 dev_caps_cfg;
+ int rc;
+
+ bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = __bnxt_hwrm_ver_get(bp, false);
if (rc)
goto hwrm_ver_get_exit;
@@ -7001,6 +7430,8 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
if (set_tpa)
tpa_flags = bp->flags & BNXT_FLAG_TPA;
+ else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return 0;
for (i = 0; i < bp->nr_vnics; i++) {
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
if (rc) {
@@ -7066,8 +7497,6 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
else
return -EINVAL;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -7087,8 +7516,6 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -7971,6 +8398,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_link_info *link_info = &bp->link_info;
+ bp->flags &= ~BNXT_FLAG_EEE_CAP;
+ if (bp->test_info)
+ bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK;
if (bp->hwrm_spec_code < 0x10201)
return 0;
@@ -8292,11 +8722,14 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_fw_init_one(struct bnxt *bp);
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_if_change_input req = {0};
- bool resc_reinit = false;
+ bool resc_reinit = false, fw_reset = false;
+ u32 flags = 0;
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
@@ -8307,26 +8740,57 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc && (resp->flags &
- cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
- resc_reinit = true;
+ if (!rc)
+ flags = le32_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (rc)
+ return rc;
- if (up && resc_reinit && BNXT_NEW_RM(bp)) {
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ if (!up)
+ return 0;
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- hw_resc->resv_cp_rings = 0;
- hw_resc->resv_stat_ctxs = 0;
- hw_resc->resv_irqs = 0;
- hw_resc->resv_tx_rings = 0;
- hw_resc->resv_rx_rings = 0;
- hw_resc->resv_hw_ring_grps = 0;
- hw_resc->resv_vnics = 0;
- bp->tx_nr_rings = 0;
- bp->rx_nr_rings = 0;
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
+ resc_reinit = true;
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ fw_reset = true;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
+ netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
+ return -ENODEV;
}
- return rc;
+ if (resc_reinit || fw_reset) {
+ if (fw_reset) {
+ rc = bnxt_fw_init_one(bp);
+ if (rc) {
+ set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ return rc;
+ }
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "init int mode failed\n");
+ return rc;
+ }
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ }
+ if (BNXT_NEW_RM(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
+ hw_resc->resv_irqs = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ if (!fw_reset) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
+ }
+ }
+ return 0;
}
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
@@ -8336,6 +8800,7 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
struct bnxt_pf_info *pf = &bp->pf;
int rc;
+ bp->num_leds = 0;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
return 0;
@@ -8430,6 +8895,7 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
{
u16 handle = 0;
+ bp->wol = 0;
if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
return;
@@ -8476,6 +8942,9 @@ static void bnxt_hwmon_open(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwmon_dev)
+ return;
+
bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
DRV_MODULE_NAME, bp,
bnxt_groups);
@@ -8741,12 +9210,28 @@ static int bnxt_open(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int rc;
- bnxt_hwrm_if_change(bp, true);
- rc = __bnxt_open_nic(bp, true, true);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
+ return -ENODEV;
+ }
+
+ rc = bnxt_hwrm_if_change(bp, true);
if (rc)
+ return rc;
+ rc = __bnxt_open_nic(bp, true, true);
+ if (rc) {
bnxt_hwrm_if_change(bp, false);
+ } else {
+ if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
+ BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+ int n = pf->active_vfs;
- bnxt_hwmon_open(bp);
+ if (n)
+ bnxt_cfg_hw_sriov(bp, &n, true);
+ }
+ bnxt_hwmon_open(bp);
+ }
return rc;
}
@@ -8783,6 +9268,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
del_timer_sync(&bp->timer);
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
+ pci_is_enabled(bp->pdev))
+ pci_disable_device(bp->pdev);
+
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
@@ -8799,6 +9288,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ /* If we get here, it means firmware reset is in progress
+ * while we are trying to close. We can safely proceed with
+ * the close because we are holding rtnl_lock(). Some firmware
+ * messages may fail as we proceed to close. We set the
+ * ABORT_ERR flag here so that the FW reset thread will later
+ * abort when it gets the rtnl_lock() and sees the flag.
+ */
+ netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
+ set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ }
+
#ifdef CONFIG_BNXT_SRIOV
if (bp->sriov_cfg) {
rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
@@ -9306,7 +9807,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & BNXT_FLAG_TPA) {
update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
- (flags & BNXT_FLAG_TPA) == 0)
+ (flags & BNXT_FLAG_TPA) == 0 ||
+ (bp->flags & BNXT_FLAG_CHIP_P5))
re_init = true;
}
@@ -9316,9 +9818,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (flags != bp->flags) {
u32 old_flags = bp->flags;
- bp->flags = flags;
-
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ bp->flags = flags;
if (update_tpa)
bnxt_set_ring_params(bp);
return rc;
@@ -9326,12 +9827,14 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (re_init) {
bnxt_close_nic(bp, false, false);
+ bp->flags = flags;
if (update_tpa)
bnxt_set_ring_params(bp);
return bnxt_open_nic(bp, false, false);
}
if (update_tpa) {
+ bp->flags = flags;
rc = bnxt_set_tpa(bp,
(flags & BNXT_FLAG_TPA) ?
true : false);
@@ -9438,6 +9941,38 @@ static void bnxt_tx_timeout(struct net_device *dev)
bnxt_queue_sp_work(bp);
}
+static void bnxt_fw_health_check(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 val;
+
+ if (!fw_health || !fw_health->enabled ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return;
+
+ if (fw_health->tmr_counter) {
+ fw_health->tmr_counter--;
+ return;
+ }
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (val == fw_health->last_fw_heartbeat)
+ goto fw_reset;
+
+ fw_health->last_fw_heartbeat = val;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ if (val != fw_health->last_fw_reset_cnt)
+ goto fw_reset;
+
+ fw_health->tmr_counter = fw_health->tmr_multiplier;
+ return;
+
+fw_reset:
+ set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+}
+
static void bnxt_timer(struct timer_list *t)
{
struct bnxt *bp = from_timer(bp, t, timer);
@@ -9449,6 +9984,9 @@ static void bnxt_timer(struct timer_list *t)
if (atomic_read(&bp->intr_sem) != 0)
goto bnxt_restart_timer;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ bnxt_fw_health_check(bp);
+
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
@@ -9504,6 +10042,130 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_reset_close(struct bnxt *bp)
+{
+ __bnxt_close_nic(bp, true, false);
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ bnxt_hwrm_func_drv_unrgtr(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
+}
+
+static bool is_bnxt_fw_ok(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ bool no_heartbeat = false, has_reset = false;
+ u32 val;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (val == fw_health->last_fw_heartbeat)
+ no_heartbeat = true;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ if (val != fw_health->last_fw_reset_cnt)
+ has_reset = true;
+
+ if (!no_heartbeat && has_reset)
+ return true;
+
+ return false;
+}
+
+/* rtnl_lock is acquired before calling this function */
+static void bnxt_force_fw_reset(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 wait_dsecs;
+
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return;
+
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_fw_reset_close(bp);
+ wait_dsecs = fw_health->master_func_wait_dsecs;
+ if (fw_health->master) {
+ if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
+ wait_dsecs = 0;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
+ } else {
+ bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
+ wait_dsecs = fw_health->normal_func_wait_dsecs;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ }
+ bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
+ bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+}
+
+void bnxt_fw_exception(struct bnxt *bp)
+{
+ set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ bnxt_rtnl_lock_sp(bp);
+ bnxt_force_fw_reset(bp);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
+/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
+ * < 0 on error.
+ */
+static int bnxt_get_registered_vfs(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ int rc;
+
+ if (!BNXT_PF(bp))
+ return 0;
+
+ rc = bnxt_hwrm_func_qcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
+ return rc;
+ }
+ if (bp->pf.registered_vfs)
+ return bp->pf.registered_vfs;
+ if (bp->sriov_cfg)
+ return 1;
+#endif
+ return 0;
+}
+
+void bnxt_fw_reset(struct bnxt *bp)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
+ !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ int n = 0;
+
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->pf.active_vfs &&
+ !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ n = bnxt_get_registered_vfs(bp);
+ if (n < 0) {
+ netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
+ n);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ dev_close(bp->dev);
+ goto fw_reset_exit;
+ } else if (n > 0) {
+ u16 vf_tmo_dsecs = n * 10;
+
+ if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
+ bp->fw_reset_max_dsecs = vf_tmo_dsecs;
+ bp->fw_reset_state =
+ BNXT_FW_RESET_STATE_POLL_VF;
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ goto fw_reset_exit;
+ }
+ bnxt_fw_reset_close(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
+ }
+fw_reset_exit:
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_chk_missed_irq(struct bnxt *bp)
{
int i;
@@ -9634,6 +10296,15 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
+ if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
+ bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
+
+ if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
+ if (!is_bnxt_fw_ok(bp))
+ bnxt_devlink_health_report(bp,
+ BNXT_FW_EXCEPTION_SP_EVENT);
+ }
+
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
@@ -9728,6 +10399,308 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
+static int bnxt_fw_init_one_p1(struct bnxt *bp)
+{
+ int rc;
+
+ bp->fw_cap = 0;
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ return rc;
+
+ if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
+ rc = bnxt_alloc_kong_hwrm_resources(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
+ }
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
+ rc = bnxt_alloc_hwrm_short_cmd_req(bp);
+ if (rc)
+ return rc;
+ }
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ return -ENODEV;
+
+ bnxt_hwrm_fw_set_time(bp);
+ return 0;
+}
+
+static int bnxt_fw_init_one_p2(struct bnxt *bp)
+{
+ int rc;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+ rc);
+ return -ENODEV;
+ }
+
+ rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
+ rc);
+
+ rc = bnxt_hwrm_error_recovery_qcfg(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
+ rc);
+
+ rc = bnxt_hwrm_func_drv_rgtr(bp);
+ if (rc)
+ return -ENODEV;
+
+ rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ if (rc)
+ return -ENODEV;
+
+ bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_vnic_qcaps(bp);
+ bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_ethtool_init(bp);
+ bnxt_dcb_init(bp);
+ return 0;
+}
+
+static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
+ bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ }
+}
+
+static void bnxt_set_dflt_rfs(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+
+ dev->hw_features &= ~NETIF_F_NTUPLE;
+ dev->features &= ~NETIF_F_NTUPLE;
+ bp->flags &= ~BNXT_FLAG_RFS;
+ if (bnxt_rfs_supported(bp)) {
+ dev->hw_features |= NETIF_F_NTUPLE;
+ if (bnxt_rfs_capable(bp)) {
+ bp->flags |= BNXT_FLAG_RFS;
+ dev->features |= NETIF_F_NTUPLE;
+ }
+ }
+}
+
+static void bnxt_fw_init_one_p3(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bnxt_set_dflt_rss_hash_type(bp);
+ bnxt_set_dflt_rfs(bp);
+
+ bnxt_get_wol_settings(bp);
+ if (bp->flags & BNXT_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&pdev->dev, bp->wol);
+ else
+ device_set_wakeup_capable(&pdev->dev, false);
+
+ bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+ bnxt_hwrm_coal_params_qcaps(bp);
+}
+
+static int bnxt_fw_init_one(struct bnxt *bp)
+{
+ int rc;
+
+ rc = bnxt_fw_init_one_p1(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Firmware init phase 1 failed\n");
+ return rc;
+ }
+ rc = bnxt_fw_init_one_p2(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Firmware init phase 2 failed\n");
+ return rc;
+ }
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
+ if (rc)
+ return rc;
+ bnxt_fw_init_one_p3(bp);
+ return 0;
+}
+
+static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
+ u32 val = fw_health->fw_reset_seq_vals[reg_idx];
+ u32 reg_type, reg_off, delay_msecs;
+
+ delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
+ reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
+ switch (reg_type) {
+ case BNXT_FW_HEALTH_REG_TYPE_CFG:
+ pci_write_config_dword(bp->pdev, reg_off, val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_GRC:
+ writel(reg_off & BNXT_GRC_BASE_MASK,
+ bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
+ reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
+ /* fall through */
+ case BNXT_FW_HEALTH_REG_TYPE_BAR0:
+ writel(val, bp->bar0 + reg_off);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR1:
+ writel(val, bp->bar1 + reg_off);
+ break;
+ }
+ if (delay_msecs) {
+ pci_read_config_dword(bp->pdev, 0, &val);
+ msleep(delay_msecs);
+ }
+}
+
+static void bnxt_reset_all(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ int i;
+
+ if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
+ for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
+ bnxt_fw_reset_writel(bp, i);
+ } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
+ struct hwrm_fw_reset_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+ req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
+ req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
+ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+ req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ }
+ bp->fw_reset_timestamp = jiffies;
+}
+
+static void bnxt_fw_reset_task(struct work_struct *work)
+{
+ struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
+ int rc;
+
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
+ return;
+ }
+
+ switch (bp->fw_reset_state) {
+ case BNXT_FW_RESET_STATE_POLL_VF: {
+ int n = bnxt_get_registered_vfs(bp);
+
+ if (n < 0) {
+ netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
+ n, jiffies_to_msecs(jiffies -
+ bp->fw_reset_timestamp));
+ goto fw_reset_abort;
+ } else if (n > 0) {
+ if (time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10))) {
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bp->fw_reset_state = 0;
+ netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
+ n);
+ return;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ return;
+ }
+ bp->fw_reset_timestamp = jiffies;
+ rtnl_lock();
+ bnxt_fw_reset_close(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ rtnl_unlock();
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
+ return;
+ }
+ case BNXT_FW_RESET_STATE_RESET_FW: {
+ u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
+
+ bnxt_reset_all(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+ return;
+ }
+ case BNXT_FW_RESET_STATE_ENABLE_DEV:
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
+ bp->fw_health) {
+ u32 val;
+
+ val = bnxt_fw_health_readl(bp,
+ BNXT_FW_RESET_INPROG_REG);
+ if (val)
+ netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
+ val);
+ }
+ clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ if (pci_enable_device(bp->pdev)) {
+ netdev_err(bp->dev, "Cannot re-enable PCI device\n");
+ goto fw_reset_abort;
+ }
+ pci_set_master(bp->pdev);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
+ /* fall through */
+ case BNXT_FW_RESET_STATE_POLL_FW:
+ bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ if (rc) {
+ if (time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10))) {
+ netdev_err(bp->dev, "Firmware reset aborted\n");
+ goto fw_reset_abort;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 5);
+ return;
+ }
+ bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
+ /* fall through */
+ case BNXT_FW_RESET_STATE_OPENING:
+ while (!rtnl_trylock()) {
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ return;
+ }
+ rc = bnxt_open(bp->dev);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_open_nic() failed\n");
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ dev_close(bp->dev);
+ }
+ bnxt_ulp_irq_restart(bp, rc);
+ rtnl_unlock();
+
+ bp->fw_reset_state = 0;
+ /* Make sure fw_reset_state is 0 before clearing the flag */
+ smp_mb__before_atomic();
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ break;
+ }
+ return;
+
+fw_reset_abort:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bp->fw_reset_state = 0;
+ rtnl_lock();
+ dev_close(bp->dev);
+ rtnl_unlock();
+}
+
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -9790,6 +10763,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
pci_enable_pcie_error_reporting(pdev);
INIT_WORK(&bp->sp_task, bnxt_sp_task);
+ INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
spin_lock_init(&bp->ntp_fltr_lock);
#if BITS_PER_LONG == 32
@@ -10333,7 +11307,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
free_netdev(dev);
}
-static int bnxt_probe_phy(struct bnxt *bp)
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
@@ -10344,8 +11318,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
rc);
return rc;
}
- mutex_init(&bp->link_lock);
-
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -10359,6 +11331,9 @@ static int bnxt_probe_phy(struct bnxt *bp)
if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
link_info->support_auto_speeds = link_info->support_speeds;
+ if (!fw_dflt)
+ return 0;
+
/*initialize the ethool setting copy with NVM settings */
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
link_info->autoneg = BNXT_AUTONEG_SPEED;
@@ -10379,7 +11354,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
else
link_info->req_flow_ctrl = link_info->force_pause_setting;
- return rc;
+ return 0;
}
static int bnxt_get_max_irq(struct pci_dev *pdev)
@@ -10683,32 +11658,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
mutex_init(&bp->hwrm_cmd_lock);
- rc = bnxt_hwrm_ver_get(bp);
+ mutex_init(&bp->link_lock);
+
+ rc = bnxt_fw_init_one_p1(bp);
if (rc)
goto init_err_pci_clean;
- if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
- rc = bnxt_alloc_kong_hwrm_resources(bp);
- if (rc)
- bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
- }
-
- if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
- bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
- rc = bnxt_alloc_hwrm_short_cmd_req(bp);
- if (rc)
- goto init_err_pci_clean;
- }
-
if (BNXT_CHIP_P5(bp))
bp->flags |= BNXT_FLAG_CHIP_P5;
- rc = bnxt_hwrm_func_reset(bp);
+ rc = bnxt_fw_init_one_p2(bp);
if (rc)
goto init_err_pci_clean;
- bnxt_hwrm_fw_set_time(bp);
-
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
@@ -10746,41 +11708,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x;
+ else if (BNXT_CHIP_P5(bp))
+ bp->gro_func = bnxt_gro_func_5750x;
}
if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
- rc = bnxt_hwrm_func_drv_rgtr(bp);
- if (rc)
- goto init_err_pci_clean;
-
- rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
- if (rc)
- goto init_err_pci_clean;
-
bp->ulp_probe = bnxt_ulp_probe;
- rc = bnxt_hwrm_queue_qportcfg(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
- /* Get the MAX capabilities for this function */
- rc = bnxt_hwrm_func_qcaps(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
-
- rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
- if (rc)
- netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
- rc);
-
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
@@ -10794,17 +11729,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
}
- bnxt_hwrm_func_qcfg(bp);
- bnxt_hwrm_vnic_qcaps(bp);
- bnxt_hwrm_port_led_qcaps(bp);
- bnxt_ethtool_init(bp);
- bnxt_dcb_init(bp);
/* MTU range: 60 - FW defined max */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = bp->max_mtu;
- rc = bnxt_probe_phy(bp);
+ rc = bnxt_probe_phy(bp, true);
if (rc)
goto init_err_pci_clean;
@@ -10818,24 +11748,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
}
- /* Default RSS hash cfg. */
- bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
- bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
- bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
- }
-
- if (bnxt_rfs_supported(bp)) {
- dev->hw_features |= NETIF_F_NTUPLE;
- if (bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- dev->features |= NETIF_F_NTUPLE;
- }
- }
+ bnxt_fw_init_one_p3(bp);
if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
@@ -10849,16 +11762,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- bnxt_get_wol_settings(bp);
- if (bp->flags & BNXT_FLAG_WOL_CAP)
- device_set_wakeup_enable(&pdev->dev, bp->wol);
- else
- device_set_wakeup_capable(&pdev->dev, false);
-
- bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
-
- bnxt_hwrm_coal_params_qcaps(bp);
-
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -10895,6 +11798,8 @@ init_err_pci_clean:
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->fw_health);
+ bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
init_err_free:
@@ -10934,8 +11839,7 @@ shutdown_exit:
#ifdef CONFIG_PM_SLEEP
static int bnxt_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
@@ -10951,8 +11855,7 @@ static int bnxt_suspend(struct device *device)
static int bnxt_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 16694b704d15..333b0a85a7fd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -113,6 +113,7 @@ struct tx_cmp {
#define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
#define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
@@ -263,14 +264,21 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
+ #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
+#define TPA_AGG_AGG_ID(rx_agg) \
+ ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \
+ RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
+
struct rx_tpa_start_cmp {
__le32 rx_tpa_start_cmp_len_flags_type;
#define RX_TPA_START_CMP_TYPE (0x3f << 0)
#define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
#define RX_TPA_START_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
@@ -278,6 +286,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
#define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
@@ -291,6 +300,8 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_start_cmp_rss_hash;
};
@@ -308,6 +319,14 @@ struct rx_tpa_start_cmp {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+#define TPA_START_AGG_ID_P5(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5)
+
+#define TPA_START_ERROR(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+
struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
@@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
#define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
+ #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
+ #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
__le32 rx_tpa_start_cmp_metadata;
__le32 rx_tpa_start_cmp_cfa_code_v2;
#define RX_TPA_START_CMP_V2 (0x1 << 0)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
__le32 rx_tpa_start_cmp_hdr_info;
@@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext {
(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+#define TPA_START_ERROR_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -361,6 +395,8 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
#define RX_TPA_END_GRO_TS (0x1 << 31)
@@ -370,6 +406,18 @@ struct rx_tpa_end_cmp {
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+#define TPA_END_AGG_ID_P5(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5)
+
+#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
+
+#define TPA_END_AGG_BUFS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
+
#define TPA_END_TPA_SEGS(rx_tpa_end) \
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
@@ -389,6 +437,10 @@ struct rx_tpa_end_cmp {
struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks;
#define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16
+ #define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24)
+ #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24
__le32 rx_tpa_end_cmp_seg_len;
#define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
@@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_errors_v2;
#define RX_TPA_END_CMP_V2 (0x1 << 0)
#define RX_TPA_END_CMP_ERRORS (0x3 << 1)
+ #define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1)
#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
u32 rx_tpa_end_cmp_start_opaque;
};
@@ -405,6 +463,28 @@ struct rx_tpa_end_cmp_ext {
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5)
+
+#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5)
+
+#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+
+#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
+ !!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
+
+#define EVENT_DATA1_RECOVERY_ENABLED(data1) \
+ !!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
+
struct nqe_cn {
__le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL
@@ -487,6 +567,9 @@ struct nqe_cn {
#define BNXT_DEFAULT_TX_RING_SIZE 511
#define MAX_TPA 64
+#define MAX_TPA_P5 256
+#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1)
+#define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16)
#define MAX_RX_PAGES 1
@@ -562,6 +645,7 @@ struct nqe_cn {
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
+#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_RESP_ERR_CODE_MASK 0xffff
@@ -768,6 +852,15 @@ struct bnxt_tpa_info {
((hdr_info) & 0x1ff)
u16 cfa_code; /* cfa_code in TPA start compl */
+ u8 agg_count;
+ struct rx_agg_cmp *agg_arr;
+};
+
+#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
+
+struct bnxt_tpa_idx_map {
+ u16 agg_id_tbl[1024];
+ unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
};
struct bnxt_rx_ring_info {
@@ -797,6 +890,7 @@ struct bnxt_rx_ring_info {
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
struct bnxt_tpa_info *rx_tpa;
+ struct bnxt_tpa_idx_map *rx_tpa_idx_map;
struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct;
@@ -978,6 +1072,7 @@ struct bnxt_pf_info {
u8 mac_addr[ETH_ALEN];
u32 first_vf_id;
u16 active_vfs;
+ u16 registered_vfs;
u16 max_vfs;
u32 max_encap_records;
u32 max_decap_records;
@@ -1137,6 +1232,9 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+#define BNXT_GRC_BASE_MASK 0xfffff000
+#define BNXT_GRC_OFFSET_MASK 0x00000ffc
+
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
@@ -1253,6 +1351,53 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info *tqm_mem[9];
};
+struct bnxt_fw_health {
+ u32 flags;
+ u32 polling_dsecs;
+ u32 master_func_wait_dsecs;
+ u32 normal_func_wait_dsecs;
+ u32 post_reset_wait_dsecs;
+ u32 post_reset_max_wait_dsecs;
+ u32 regs[4];
+ u32 mapped_regs[4];
+#define BNXT_FW_HEALTH_REG 0
+#define BNXT_FW_HEARTBEAT_REG 1
+#define BNXT_FW_RESET_CNT_REG 2
+#define BNXT_FW_RESET_INPROG_REG 3
+ u32 fw_reset_inprog_reg_mask;
+ u32 last_fw_heartbeat;
+ u32 last_fw_reset_cnt;
+ u8 enabled:1;
+ u8 master:1;
+ u8 tmr_multiplier;
+ u8 tmr_counter;
+ u8 fw_reset_seq_cnt;
+ u32 fw_reset_seq_regs[16];
+ u32 fw_reset_seq_vals[16];
+ u32 fw_reset_seq_delay_msec[16];
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *fw_reset_reporter;
+ struct devlink_health_reporter *fw_fatal_reporter;
+};
+
+struct bnxt_fw_reporter_ctx {
+ unsigned long sp_event;
+};
+
+#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
+#define BNXT_FW_HEALTH_REG_TYPE_CFG 0
+#define BNXT_FW_HEALTH_REG_TYPE_GRC 1
+#define BNXT_FW_HEALTH_REG_TYPE_BAR0 2
+#define BNXT_FW_HEALTH_REG_TYPE_BAR1 3
+
+#define BNXT_FW_HEALTH_REG_TYPE(reg) ((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK)
+#define BNXT_FW_HEALTH_REG_OFF(reg) ((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK)
+
+#define BNXT_FW_HEALTH_WIN_BASE 0x3000
+#define BNXT_FW_HEALTH_WIN_MAP_OFF 8
+
+#define BNXT_FW_STATUS_HEALTHY 0x8000
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1282,7 +1427,9 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
@@ -1379,12 +1526,14 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- !(bp->flags & BNXT_FLAG_CHIP_P5) && \
- !is_kdump_kernel())
+ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
+ (bp)->max_tpa_v2) && !is_kdump_kernel())
/* Chip class phase 5 */
#define BNXT_CHIP_P5(bp) \
- ((bp)->chip_num == CHIP_NUM_57500)
+ ((bp)->chip_num == CHIP_NUM_57508 || \
+ (bp)->chip_num == CHIP_NUM_57504 || \
+ (bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
@@ -1414,6 +1563,8 @@ struct bnxt {
u16, void *, u8 *, dma_addr_t,
unsigned int);
+ u16 max_tpa_v2;
+ u16 max_tpa;
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
u16 rx_offset;
@@ -1469,6 +1620,10 @@ struct bnxt {
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
#define BNXT_STATE_READ_STATS 2
+#define BNXT_STATE_FW_RESET_DET 3
+#define BNXT_STATE_IN_FW_RESET 4
+#define BNXT_STATE_ABORT_ERR 5
+#define BNXT_STATE_FW_FATAL_COND 6
struct bnxt_irq *irq_tbl;
int total_irqs;
@@ -1493,6 +1648,7 @@ struct bnxt {
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800
+ #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
@@ -1525,6 +1681,7 @@ struct bnxt {
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
+ u16 hw_ring_stats_size;
u8 pri2cos[8];
u8 pri2cos_valid;
@@ -1576,6 +1733,24 @@ struct bnxt {
#define BNXT_FLOW_STATS_SP_EVENT 15
#define BNXT_UPDATE_PHY_SP_EVENT 16
#define BNXT_RING_COAL_NOW_SP_EVENT 17
+#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
+#define BNXT_FW_EXCEPTION_SP_EVENT 19
+
+ struct delayed_work fw_reset_task;
+ int fw_reset_state;
+#define BNXT_FW_RESET_STATE_POLL_VF 1
+#define BNXT_FW_RESET_STATE_RESET_FW 2
+#define BNXT_FW_RESET_STATE_ENABLE_DEV 3
+#define BNXT_FW_RESET_STATE_POLL_FW 4
+#define BNXT_FW_RESET_STATE_OPENING 5
+
+ u16 fw_reset_min_dsecs;
+#define BNXT_DFLT_FW_RST_MIN_DSECS 20
+ u16 fw_reset_max_dsecs;
+#define BNXT_DFLT_FW_RST_MAX_DSECS 60
+ unsigned long fw_reset_timestamp;
+
+ struct bnxt_fw_health *fw_health;
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
@@ -1637,7 +1812,6 @@ struct bnxt {
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
- struct dentry *debugfs_dim;
struct device *hwmon_dev;
};
@@ -1782,6 +1956,7 @@ extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
+u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
@@ -1814,6 +1989,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_fw_exception(struct bnxt *bp);
+void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 07301cb87c03..fb6f30d0d1d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -377,8 +377,6 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set.hdr_cnt = 1;
rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
set_app_exit:
dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
@@ -391,6 +389,7 @@ static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
struct hwrm_queue_dscp_qcaps_input req = {0};
int rc;
+ bp->max_dscp_value = 0;
if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
return 0;
@@ -433,8 +432,6 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
dscp2pri->pri = app->priority;
req.entry_cnt = cpu_to_le16(1);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
mapping);
return rc;
@@ -722,6 +719,7 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
void bnxt_dcb_init(struct bnxt *bp)
{
+ bp->dcbx_cap = 0;
if (bp->hwrm_spec_code < 0x10501)
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 61393f351a77..156c2404854f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -61,45 +61,30 @@ static const struct file_operations debugfs_dim_fops = {
.read = debugfs_dim_read,
};
-static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx,
- struct dentry *dd)
+static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
+ struct dentry *dd)
{
static char qname[16];
snprintf(qname, 10, "%d", ring_idx);
- return debugfs_create_file(qname, 0600, dd,
- dim, &debugfs_dim_fops);
+ debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
void bnxt_debug_dev_init(struct bnxt *bp)
{
const char *pname = pci_name(bp->pdev);
- struct dentry *pdevf;
+ struct dentry *dir;
int i;
bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
- if (bp->debugfs_pdev) {
- pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
- if (!pdevf) {
- pr_err("failed to create debugfs entry %s/dim\n",
- pname);
- return;
- }
- bp->debugfs_dim = pdevf;
- /* create files for each rx ring */
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ dir = debugfs_create_dir("dim", bp->debugfs_pdev);
- if (cpr && bp->bnapi[i]->rx_ring) {
- pdevf = debugfs_dim_ring_init(&cpr->dim, i,
- bp->debugfs_dim);
- if (!pdevf)
- pr_err("failed to create debugfs entry %s/dim/%d\n",
- pname, i);
- }
- }
- } else {
- pr_err("failed to create debugfs entry %s\n", pname);
+ /* create files for each rx ring */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (cpr && bp->bnapi[i]->rx_ring)
+ debugfs_dim_ring_init(&cpr->dim, i, dir);
}
}
@@ -114,8 +99,6 @@ void bnxt_debug_dev_exit(struct bnxt *bp)
void bnxt_debug_init(void)
{
bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
- if (!bnxt_debug_mnt)
- pr_err("failed to init bnxt_en debugfs\n");
}
void bnxt_debug_exit(void)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index c05d663212b2..e664392dccc0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -15,6 +15,192 @@
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
+static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ struct bnxt_fw_health *health = bp->fw_health;
+ u32 val, health_status;
+ int rc;
+
+ if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return 0;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ health_status = val & 0xffff;
+
+ if (health_status == BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Healthy;");
+ if (rc)
+ return rc;
+ } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Not yet completed initialization;");
+ if (rc)
+ return rc;
+ } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Encountered fatal error and cannot recover;");
+ if (rc)
+ return rc;
+ }
+
+ if (val >> 16) {
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
+ if (rc)
+ return rc;
+ }
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = bnxt_fw_reporter_diagnose,
+};
+
+static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+
+ if (!priv_ctx)
+ return -EOPNOTSUPP;
+
+ bnxt_fw_reset(bp);
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
+ .name = "fw_reset",
+ .recover = bnxt_fw_reset_recover,
+};
+
+static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
+ unsigned long event;
+
+ if (!priv_ctx)
+ return -EOPNOTSUPP;
+
+ event = fw_reporter_ctx->sp_event;
+ if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
+ bnxt_fw_reset(bp);
+ else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
+ bnxt_fw_exception(bp);
+
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = bnxt_fw_fatal_recover,
+};
+
+static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+
+ if (!health)
+ return;
+
+ health->fw_reporter =
+ devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
+ 0, false, bp);
+ if (IS_ERR(health->fw_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reporter));
+ health->fw_reporter = NULL;
+ }
+
+ health->fw_reset_reporter =
+ devlink_health_reporter_create(bp->dl,
+ &bnxt_dl_fw_reset_reporter_ops,
+ 0, true, bp);
+ if (IS_ERR(health->fw_reset_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reset_reporter));
+ health->fw_reset_reporter = NULL;
+ }
+
+ health->fw_fatal_reporter =
+ devlink_health_reporter_create(bp->dl,
+ &bnxt_dl_fw_fatal_reporter_ops,
+ 0, true, bp);
+ if (IS_ERR(health->fw_fatal_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_fatal_reporter));
+ health->fw_fatal_reporter = NULL;
+ }
+}
+
+static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+
+ if (!health)
+ return;
+
+ if (health->fw_reporter)
+ devlink_health_reporter_destroy(health->fw_reporter);
+
+ if (health->fw_reset_reporter)
+ devlink_health_reporter_destroy(health->fw_reset_reporter);
+
+ if (health->fw_fatal_reporter)
+ devlink_health_reporter_destroy(health->fw_fatal_reporter);
+}
+
+void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct bnxt_fw_reporter_ctx fw_reporter_ctx;
+
+ if (!fw_health)
+ return;
+
+ fw_reporter_ctx.sp_event = event;
+ switch (event) {
+ case BNXT_FW_RESET_NOTIFY_SP_EVENT:
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ if (!fw_health->fw_fatal_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_fatal_reporter,
+ "FW fatal async event received",
+ &fw_reporter_ctx);
+ return;
+ }
+ if (!fw_health->fw_reset_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_reset_reporter,
+ "FW non-fatal reset event received",
+ &fw_reporter_ctx);
+ return;
+
+ case BNXT_FW_EXCEPTION_SP_EVENT:
+ if (!fw_health->fw_fatal_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_fatal_reporter,
+ "FW fatal error reported",
+ &fw_reporter_ctx);
+ return;
+ }
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -109,13 +295,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
memcpy(buf, data_addr, bytesize);
dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
- return -EACCES;
- } else if (rc) {
- return -EIO;
- }
- return 0;
+ return rc;
}
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
@@ -251,6 +433,8 @@ int bnxt_dl_register(struct bnxt *bp)
devlink_params_publish(dl);
+ bnxt_dl_fw_reporters_create(bp);
+
return 0;
err_dl_port_unreg:
@@ -273,6 +457,7 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
+ bnxt_dl_fw_reporters_destroy(bp);
devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
ARRAY_SIZE(bnxt_dl_port_params));
devlink_port_unregister(&bp->dl_port);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 5b6b2c7d97cf..b97e0baeb42d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -55,6 +55,7 @@ struct bnxt_dl_nvm_param {
u16 num_bits;
};
+void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8445a0cce849..235265eeec7d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,44 @@ reset_coalesce:
return rc;
}
-#define BNXT_NUM_STATS 22
+static const char * const bnxt_ring_stats_str[] = {
+ "rx_ucast_packets",
+ "rx_mcast_packets",
+ "rx_bcast_packets",
+ "rx_discards",
+ "rx_drops",
+ "rx_ucast_bytes",
+ "rx_mcast_bytes",
+ "rx_bcast_bytes",
+ "tx_ucast_packets",
+ "tx_mcast_packets",
+ "tx_bcast_packets",
+ "tx_discards",
+ "tx_drops",
+ "tx_ucast_bytes",
+ "tx_mcast_bytes",
+ "tx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tpa_stats_str[] = {
+ "tpa_packets",
+ "tpa_bytes",
+ "tpa_events",
+ "tpa_aborts",
+};
+
+static const char * const bnxt_ring_tpa2_stats_str[] = {
+ "rx_tpa_eligible_pkt",
+ "rx_tpa_eligible_bytes",
+ "rx_tpa_pkt",
+ "rx_tpa_bytes",
+ "rx_tpa_errors",
+};
+
+static const char * const bnxt_ring_sw_stats_str[] = {
+ "rx_l4_csum_errors",
+ "missed_irqs",
+};
#define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -207,6 +244,20 @@ reset_coalesce:
BNXT_TX_STATS_EXT_COS_ENTRY(6), \
BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
+ BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
+
+#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
+
#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
__stringify(counter##_pri##n) }
@@ -352,6 +403,7 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
};
static const struct {
@@ -417,9 +469,29 @@ static const struct {
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
+static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
+{
+ if (BNXT_SUPPORTS_TPA(bp)) {
+ if (bp->max_tpa_v2)
+ return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ }
+ return 0;
+}
+
+static int bnxt_get_num_ring_stats(struct bnxt *bp)
+{
+ int num_stats;
+
+ num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
+ ARRAY_SIZE(bnxt_ring_sw_stats_str) +
+ bnxt_get_num_tpa_ring_stats(bp);
+ return num_stats * bp->cp_nr_rings;
+}
+
static int bnxt_get_num_stats(struct bnxt *bp)
{
- int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ int num_stats = bnxt_get_num_ring_stats(bp);
num_stats += BNXT_NUM_SW_FUNC_STATS;
@@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+ u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
+ bnxt_get_num_tpa_ring_stats(bp);
if (!bp->bnapi) {
- j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS;
+ j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
goto skip_ring_stats;
}
@@ -551,56 +624,39 @@ skip_ring_stats:
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnxt *bp = netdev_priv(dev);
- u32 i;
+ static const char * const *str;
+ u32 i, j, num_str;
switch (stringset) {
- /* The number of strings must match BNXT_NUM_STATS defined above. */
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- sprintf(buf, "[%d]: rx_ucast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_mcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_bcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_discards", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_drops", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_ucast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_mcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_bcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_ucast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_mcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_bcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_discards", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_drops", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_ucast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_mcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_bcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_events", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_aborts", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_l4_csum_errors", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: missed_irqs", i);
- buf += ETH_GSTRING_LEN;
+ num_str = ARRAY_SIZE(bnxt_ring_stats_str);
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ if (!BNXT_SUPPORTS_TPA(bp))
+ goto skip_tpa_stats;
+
+ if (bp->max_tpa_v2) {
+ num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ str = bnxt_ring_tpa2_stats_str;
+ } else {
+ num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ str = bnxt_ring_tpa_stats_str;
+ }
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i, str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+skip_tpa_stats:
+ num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -1643,6 +1699,11 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+static void bnxt_print_admin_err(struct bnxt *bp)
+{
+ netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
+}
+
static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length);
@@ -1682,13 +1743,8 @@ static int bnxt_flash_nvram(struct net_device *dev,
rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to flash the device\n");
- rc = -EACCES;
- } else if (rc) {
- rc = -EIO;
- }
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -1738,13 +1794,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to reset the device\n");
- rc = -EACCES;
- } else if (rc) {
- rc = -EIO;
- }
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -2039,13 +2090,8 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
err_exit:
- if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to flash the device\n");
- rc = -EACCES;
- } else if (hwrm_err) {
- rc = -EOPNOTSUPP;
- }
+ if (hwrm_err == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -2584,8 +2630,6 @@ static int bnxt_set_phys_id(struct net_device *dev,
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -3306,6 +3350,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
+ bp->num_tests = 0;
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
@@ -3315,7 +3360,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (rc)
goto ethtool_init_exit;
- test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
+ test_info = bp->test_info;
+ if (!test_info)
+ test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
if (!test_info)
goto ethtool_init_exit;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 12bbb2a207d0..2cdef753a1bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,7 +1,8 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2019 Broadcom Limited
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * Copyright (c) 2018-2019 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -39,15 +40,15 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
-#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
-#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
-#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
-#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+#define TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY
/* tlv (size:64b/8B) */
@@ -267,7 +268,6 @@ struct cmd_nums {
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_CFA_TFLIB 0x125UL
- #define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -313,6 +313,7 @@ struct cmd_nums {
#define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -410,8 +411,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 69
-#define HWRM_VERSION_STR "1.10.0.69"
+#define HWRM_VERSION_RSVD 89
+#define HWRM_VERSION_STR "1.10.0.89"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -624,6 +625,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1122,6 +1125,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1241,6 +1245,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -2916,7 +2921,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:2624b/328B) */
+/* rx_port_stats_ext (size:3648b/456B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -2959,6 +2964,22 @@ struct rx_port_stats_ext {
__le64 rx_buffer_passed_threshold;
__le64 rx_pcs_symbol_err;
__le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -6115,6 +6136,21 @@ struct hwrm_cfa_flow_alloc_output {
u8 valid;
};
+/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_flow_alloc_cmd_err {
+ u8 code;
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
+ u8 unused_0[7];
+};
+
/* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
@@ -6305,7 +6341,7 @@ struct hwrm_cfa_eem_qcaps_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
struct hwrm_cfa_eem_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -6322,15 +6358,17 @@ struct hwrm_cfa_eem_qcaps_output {
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
__le32 max_entries_supported;
__le16 key_entry_size;
__le16 record_entry_size;
__le16 efc_entry_size;
- u8 unused_1;
+ __le16 fid_entry_size;
+ u8 unused_1[7];
u8 valid;
};
-/* hwrm_cfa_eem_cfg_input (size:320b/40B) */
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
struct hwrm_cfa_eem_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6350,6 +6388,9 @@ struct hwrm_cfa_eem_cfg_input {
__le16 key1_ctx_id;
__le16 record_ctx_id;
__le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ __le16 unused_2;
+ __le32 unused_3;
};
/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
@@ -6375,7 +6416,7 @@ struct hwrm_cfa_eem_qcfg_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
struct hwrm_cfa_eem_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -6386,7 +6427,12 @@ struct hwrm_cfa_eem_qcfg_output {
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries;
- u8 unused_0[7];
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ u8 unused_2[5];
u8 valid;
};
@@ -6567,6 +6613,31 @@ struct ctx_hw_stats {
__le64 tpa_aborts;
};
+/* ctx_hw_stats_ext (size:1344b/168B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+};
+
/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
@@ -6578,7 +6649,8 @@ struct hwrm_stat_ctx_alloc_input {
__le32 update_period_ms;
u8 stat_ctx_flags;
#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0[3];
+ u8 unused_0;
+ __le16 stats_dma_length;
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -7204,7 +7276,9 @@ struct coredump_segment_record {
u8 version_hi;
u8 version_low;
u8 seg_flags;
- u8 unused_0[7];
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[6];
};
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
@@ -7729,6 +7803,9 @@ struct hwrm_nvm_set_variable_input {
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
u8 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 2b90a2bb1a1d..f6f3454d6059 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -25,7 +25,6 @@
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf, u16 event_id)
{
- struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_fwd_async_event_cmpl_input req = {0};
struct hwrm_async_event_cmpl *async_cmpl;
int rc = 0;
@@ -40,23 +39,10 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
- goto fwd_async_event_cmpl_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_async_event_cmpl_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -133,7 +119,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
- return -EIO;
+ return rc;
}
vf->func_qcfg_flags = le16_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -164,9 +150,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
else
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -EIO;
- return 0;
+ return rc;
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
@@ -486,10 +470,43 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+/* Caller holds bp->hwrm_cmd_lock mutex lock */
+static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt_vf_info *vf;
+
+ vf = &bp->pf.vf[vf_id];
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
+ }
+ if (vf->vlan) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = cpu_to_le16(vf->vlan);
+ }
+ if (vf->max_tx_rate) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+ req.max_bw = cpu_to_le32(vf->max_tx_rate);
+#ifdef HAVE_IFLA_TX_RATE
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req.min_bw = cpu_to_le32(vf->min_tx_rate);
+#endif
+ }
+ if (vf->flags & BNXT_VF_TRUST)
+ req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
+
+ _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
/* Only called by PF to reserve resources for VFs, returns actual number of
* VFs configured, or < 0 on error.
*/
-static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
struct hwrm_func_vf_resource_cfg_input req = {0};
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -561,13 +578,14 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
+ if (reset)
+ __bnxt_set_vf_params(bp, i);
+
req.vf_id = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
break;
- }
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = pf->first_vf_id + i;
}
@@ -664,8 +682,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -ENOMEM;
if (pf->active_vfs) {
hw_resc->max_tx_rings -= total_vf_tx_rings;
hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
@@ -679,14 +695,40 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
return rc;
}
-static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
if (BNXT_NEW_RM(bp))
- return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+ return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
else
return bnxt_hwrm_func_cfg(bp, num_vfs);
}
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
+{
+ int rc;
+
+ /* Register buffers for VFs */
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ return rc;
+
+ /* Reserve resources for VFs */
+ rc = bnxt_func_cfg(bp, *num_vfs, reset);
+ if (rc != *num_vfs) {
+ if (rc <= 0) {
+ netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+ *num_vfs = 0;
+ return rc;
+ }
+ netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
+ rc);
+ *num_vfs = rc;
+ }
+
+ bnxt_ulp_sriov_cfg(bp, *num_vfs);
+ return 0;
+}
+
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
int rc = 0, vfs_supported;
@@ -752,25 +794,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out1;
- /* Reserve resources for VFs */
- rc = bnxt_func_cfg(bp, *num_vfs);
- if (rc != *num_vfs) {
- if (rc <= 0) {
- netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
- *num_vfs = 0;
- goto err_out2;
- }
- netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
- *num_vfs = rc;
- }
-
- /* Register buffers for VFs */
- rc = bnxt_hwrm_func_buf_rgtr(bp);
+ rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
if (rc)
goto err_out2;
- bnxt_ulp_sriov_cfg(bp, *num_vfs);
-
rc = pci_enable_sriov(bp->pdev, *num_vfs);
if (rc)
goto err_out2;
@@ -837,6 +864,11 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
rtnl_unlock();
return 0;
}
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ rtnl_unlock();
+ return 0;
+ }
bp->sriov_cfg = true;
rtnl_unlock();
@@ -870,7 +902,6 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_fwd_resp_input req = {0};
- struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -885,22 +916,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_cmpl_ring = encap_resp_cpr;
memcpy(req.encap_resp, encap_resp, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
- goto fwd_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -909,7 +927,6 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_reject_fwd_resp_input req = {0};
- struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -920,22 +937,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
- goto fwd_err_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_err_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -944,7 +948,6 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_exec_fwd_resp_input req = {0};
- struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -955,22 +958,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
- goto exec_fwd_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-exec_fwd_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -1190,6 +1180,13 @@ mac_done:
}
#else
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
+{
+ if (*num_vfs)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
void bnxt_sriov_disable(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 2eed9eda1195..629641bf6fc5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -36,6 +36,7 @@ int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index dd621f6bd127..c8062d020d1e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -319,8 +319,6 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -515,11 +513,6 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
- rc = -ENOSPC;
- else if (rc)
- rc = -EIO;
return rc;
}
@@ -591,8 +584,6 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -609,8 +600,6 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -660,8 +649,6 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -678,8 +665,6 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -1457,8 +1442,6 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index fc77caf0a076..b2c160947fc8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -226,6 +226,9 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct input *req;
int rc;
+ if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
+ return -EBUSY;
+
mutex_lock(&bp->hwrm_cmd_lock);
req = fw_msg->msg;
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 57dc3cbff36e..155599dcee76 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4096,12 +4096,16 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 port_id;
+ int i;
cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
+ for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
+ atomic_set(&cp->csk_tbl[i].ref_count, 0);
+
port_id = prandom_u32();
port_id %= CNIC_LOCAL_PORT_RANGE;
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
@@ -5480,6 +5484,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
+ atomic_set(&cdev->ref_count, 0);
cp = cdev->cnic_priv;
cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b22196880d6d..12cb77ef1081 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2516,19 +2516,14 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
struct netdev_queue *txq;
- struct sk_buff *skb;
- struct enet_cb *cb;
int i;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++) {
- cb = priv->tx_cbs + i;
- skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
- if (skb)
- dev_kfree_skb(skb);
- }
+ for (i = 0; i < priv->num_tx_bds; i++)
+ dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
+ priv->tx_cbs + i));
for (i = 0; i < priv->hw_params->tx_queues; i++) {
txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
@@ -3438,7 +3433,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
- struct resource *r;
unsigned int i;
int err = -EIO;
const char *phy_mode_str;
@@ -3478,8 +3472,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
macaddr = pd->mac_address;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
err = PTR_ERR(priv->base);
goto err;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4c404d2213f9..77f3511b97de 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18041,8 +18041,7 @@ static void tg3_remove_one(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int tg3_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
int err = 0;
@@ -18098,8 +18097,7 @@ unlock:
static int tg3_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
int err = 0;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7767ae6fa1fd..e338272931d1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3032,7 +3032,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
head_unmap->nvecs++;
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 99f49d059414..f96a42af1014 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1104,7 +1104,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
paddr = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index eab805579f96..7f3b2e3b0868 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1492,11 +1492,11 @@ static void free_netsgbuf(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -1535,11 +1535,11 @@ static void free_netsgbuf_with_resp(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -2424,7 +2424,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
} else {
int i, frags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct octnic_gather *g;
spin_lock(&lio->glist_lock[q_idx]);
@@ -2462,11 +2462,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
g->sg[(i >> 2)].ptr[(i & 3)] =
- dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) {
@@ -2478,7 +2476,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3],
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -2486,7 +2484,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
i++;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index db0b90555acb..370d76822ee0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -837,11 +837,11 @@ static void free_netsgbuf(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -881,11 +881,11 @@ static void free_netsgbuf_with_resp(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -1497,7 +1497,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
ndata.reqtype = REQTYPE_NORESP_NET;
} else {
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct octnic_gather *g;
int i, frags;
@@ -1535,11 +1535,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
g->sg[(i >> 2)].ptr[(i & 3)] =
- dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) {
dma_unmap_single(&oct->pci_dev->dev,
@@ -1550,7 +1548,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3],
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -1558,7 +1556,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
i++;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 021d99cd1665..614d07be7181 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -260,9 +260,7 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
dev_info(&oct->pci_dev->dev,
"got a request for FLR from VF that owns DPI ring %u\n",
mbox->q_no);
- pcie_capability_set_word(
- oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no],
- PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]);
break;
case OCTEON_PF_CHANGED_VF_MACADDR:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 192bc92da881..4ab57d33a87e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1588,15 +1588,13 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
goto doorbell;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_frag_size(frag);
dma_addr = dma_map_page_attrs(&nic->pdev->dev,
skb_frag_page(frag),
- frag->page_offset, size,
+ skb_frag_off(frag), size,
DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 89db739b7819..6dabbf1502c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag;
+ skb_frag_t *rx_frag;
int nr_frags;
int offset = 0;
@@ -2182,7 +2182,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
rx_frag += nr_frags;
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
- rx_frag->page_offset = sd->pg_chunk.offset + offset;
+ skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
skb_frag_size_set(rx_frag, len);
skb->len += len;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index d692251ee252..ae6a47dd7dc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3531,7 +3531,6 @@ int t4_setup_debugfs(struct adapter *adap)
{
int i;
u32 size = 0;
- struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
{ "cim_la", &cim_la_fops, 0400, 0 },
@@ -3642,8 +3641,8 @@ int t4_setup_debugfs(struct adapter *adap)
}
}
- de = debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
- &flash_debugfs_fops, adap->params.sf_size);
+ debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
+ &flash_debugfs_fops, adap->params.sf_size);
debugfs_create_bool("use_backdoor", 0600,
adap->debugfs_root, &adap->use_bd);
debugfs_create_bool("trace_rss", 0600,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4311ad9c84b2..71854a19cebe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6269,10 +6269,7 @@ static int __init cxgb4_init_module(void)
{
int ret;
- /* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4_debugfs_root)
- pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
index eaf1fb74689c..01c65d13fc0e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -57,7 +57,7 @@ struct smt_data *t4_init_smt(void)
s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
spin_lock_init(&s->smtab[i].lock);
- atomic_set(&s->smtab[i].refcnt, 0);
+ s->smtab[i].refcnt = 0;
}
return s;
}
@@ -68,7 +68,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (atomic_read(&e->refcnt) == 0) {
+ if (e->refcnt == 0) {
if (!first_free)
first_free = e;
} else {
@@ -97,11 +97,9 @@ found_reuse:
static void t4_smte_free(struct smt_entry *e)
{
- spin_lock_bh(&e->lock);
- if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->refcnt == 0) { /* hasn't been recycled */
e->state = SMT_STATE_UNUSED;
}
- spin_unlock_bh(&e->lock);
}
/**
@@ -111,8 +109,10 @@ static void t4_smte_free(struct smt_entry *e)
*/
void cxgb4_smt_release(struct smt_entry *e)
{
- if (atomic_dec_and_test(&e->refcnt))
+ spin_lock_bh(&e->lock);
+ if ((--e->refcnt) == 0)
t4_smte_free(e);
+ spin_unlock_bh(&e->lock);
}
EXPORT_SYMBOL(cxgb4_smt_release);
@@ -215,14 +215,14 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
e = find_or_alloc_smte(s, smac);
if (e) {
spin_lock(&e->lock);
- if (!atomic_read(&e->refcnt)) {
- atomic_set(&e->refcnt, 1);
+ if (!e->refcnt) {
+ e->refcnt = 1;
e->state = SMT_STATE_SWITCHING;
e->pfvf = pfvf;
memcpy(e->src_mac, smac, ETH_ALEN);
write_smt_entry(adap, e);
} else {
- atomic_inc(&e->refcnt);
+ ++e->refcnt;
}
spin_unlock(&e->lock);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index d6c2cc271398..1268d6e93a47 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -59,7 +59,7 @@ struct smt_entry {
u16 idx;
u16 pfvf;
u8 src_mac[ETH_ALEN];
- atomic_t refcnt;
+ int refcnt;
spinlock_t lock; /* protect smt entry add,removal */
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 6d4cf3d0b2f0..f6fc0875d5b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2478,11 +2478,10 @@ static int setup_debugfs(struct adapter *adapter)
* Debugfs support is best effort.
*/
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
- (void)debugfs_create_file(debugfs_files[i].name,
- debugfs_files[i].mode,
- adapter->debugfs_root,
- (void *)adapter,
- debugfs_files[i].fops);
+ debugfs_create_file(debugfs_files[i].name,
+ debugfs_files[i].mode,
+ adapter->debugfs_root, (void *)adapter,
+ debugfs_files[i].fops);
return 0;
}
@@ -3257,11 +3256,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
- if (IS_ERR_OR_NULL(adapter->debugfs_root))
- dev_warn(&pdev->dev, "could not create debugfs"
- " directory");
- else
- setup_debugfs(adapter);
+ setup_debugfs(adapter);
}
/*
@@ -3486,13 +3481,11 @@ static int __init cxgb4vf_module_init(void)
return -EINVAL;
}
- /* Debugfs support is optional, just warn if this fails */
+ /* Debugfs support is optional, debugfs will warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
- pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
- if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
+ if (ret < 0)
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index b3e7fafee3df..c9aebcde403a 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1844,16 +1844,12 @@ cleanup_module(void)
static int __init cs89x0_platform_probe(struct platform_device *pdev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- struct net_local *lp;
- struct resource *mem_res;
void __iomem *virt_addr;
int err;
if (!dev)
return -ENOMEM;
- lp = netdev_priv(dev);
-
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq <= 0) {
dev_warn(&dev->dev, "interrupt resource missing\n");
@@ -1861,8 +1857,7 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
goto free;
}
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virt_addr = devm_ioremap_resource(&pdev->dev, mem_res);
+ virt_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(virt_addr)) {
err = PTR_ERR(virt_addr);
goto free;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9003eb6716cd..e736ce2c58ca 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1182,9 +1182,8 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
buflen = skb_headlen(skb);
} else {
skb_frag = skb_si->frags + frag;
- buffer = page_address(skb_frag_page(skb_frag)) +
- skb_frag->page_offset;
- buflen = skb_frag->size;
+ buffer = skb_frag_address(skb_frag);
+ buflen = skb_frag_size(skb_frag);
}
if (frag == last_frag) {
@@ -2423,10 +2422,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
/* Interrupt */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "no IRQ\n");
+ if (irq <= 0)
return irq ? irq : -ENODEV;
- }
port->irq = irq;
/* Clock the port */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 386bdc1378d1..cce90b5925d9 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1500,8 +1500,6 @@ dm9000_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq < 0) {
- dev_err(db->dev, "interrupt resource unavailable: %d\n",
- ndev->irq);
ret = ndev->irq;
goto out;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f287b5da5546..cf3e6f2892ff 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -192,7 +192,6 @@ struct be_eq_obj {
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
- bool enable;
u32 min_eqd; /* in usecs */
u32 max_eqd; /* in usecs */
u32 prev_eqd; /* in usecs */
@@ -589,6 +588,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
+ bool aic_enabled;
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 492f8769ac12..5bb5abf99588 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -329,8 +329,8 @@ static int be_get_coalesce(struct net_device *netdev,
et->tx_coalesce_usecs_high = aic->max_eqd;
et->tx_coalesce_usecs_low = aic->min_eqd;
- et->use_adaptive_rx_coalesce = aic->enable;
- et->use_adaptive_tx_coalesce = aic->enable;
+ et->use_adaptive_rx_coalesce = adapter->aic_enabled;
+ et->use_adaptive_tx_coalesce = adapter->aic_enabled;
return 0;
}
@@ -346,8 +346,9 @@ static int be_set_coalesce(struct net_device *netdev,
struct be_eq_obj *eqo;
int i;
+ adapter->aic_enabled = et->use_adaptive_rx_coalesce;
+
for_all_evt_queues(adapter, eqo, i) {
- aic->enable = et->use_adaptive_rx_coalesce;
aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4d8e40ac66d2..39eb7d525043 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1014,7 +1014,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
@@ -2147,7 +2147,7 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
int i;
aic = &adapter->aic_obj[eqo->idx];
- if (!aic->enable) {
+ if (!adapter->aic_enabled) {
if (aic->jiffies)
aic->jiffies = 0;
eqd = aic->et_eqd;
@@ -2204,7 +2204,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
int eqd;
u32 mult_enc;
- if (!aic->enable)
+ if (!adapter->aic_enabled)
return 0;
if (jiffies_to_msecs(now - aic->jiffies) < 1)
@@ -2346,8 +2346,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
skb_frag_set_page(skb, 0, page_info->page);
- skb_shinfo(skb)->frags[0].page_offset =
- page_info->page_offset + hdr_len;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[0],
+ page_info->page_offset + hdr_len);
skb_frag_size_set(&skb_shinfo(skb)->frags[0],
curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
@@ -2372,8 +2372,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
/* Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+ page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_shinfo(skb)->nr_frags++;
} else {
@@ -2454,8 +2454,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
/* First frag or Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+ page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
} else {
put_page(page_info->page);
@@ -2959,6 +2959,8 @@ static int be_evt_queues_create(struct be_adapter *adapter)
max(adapter->cfg_num_rx_irqs,
adapter->cfg_num_tx_irqs));
+ adapter->aic_enabled = true;
+
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
@@ -2966,7 +2968,6 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eqo->adapter = adapter;
eqo->idx = i;
aic->max_eqd = BE_MAX_EQD;
- aic->enable = true;
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 027225e1ade2..815fb62c4b02 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -576,7 +576,6 @@ static s32 nps_enet_probe(struct platform_device *pdev)
struct nps_enet_priv *priv;
s32 err = 0;
const char *mac_addr;
- struct resource *res_regs;
if (!dev->of_node)
return -ENODEV;
@@ -595,8 +594,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* FIXME :: no multicast support yet */
ndev->flags &= ~IFF_MULTICAST;
- res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs_base = devm_ioremap_resource(dev, res_regs);
+ priv->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs_base)) {
err = PTR_ERR(priv->regs_base);
goto out_netdev;
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index a9b105803fb7..73e4f2648e49 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -32,6 +32,7 @@ config FTGMAC100
depends on ARM || NDS32 || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select MDIO_ASPEED if MACH_ASPEED_G6
---help---
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 030fed65393e..9b7af94a40bb 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -774,7 +775,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
/* Map it */
map = skb_frag_dma_map(priv->dev, frag, 0, len,
@@ -1619,8 +1620,13 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
if (!priv->mii_bus)
return -EIO;
- if (priv->is_aspeed) {
- /* This driver supports the old MDIO interface */
+ if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ /* The AST2600 has a separate MDIO controller */
+
+ /* For the AST2400 and AST2500 this driver only supports the
+ * old MDIO interface
+ */
reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
@@ -1797,7 +1803,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
np = pdev->dev.of_node;
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
+ of_device_is_compatible(np, "aspeed,ast2500-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
@@ -1817,7 +1824,29 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
goto err_ncsi_dev;
- } else {
+ } else if (np && of_get_property(np, "phy-handle", NULL)) {
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to phy\n");
+ goto err_setup_mdio;
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.txt)
+ */
+ phy_support_asym_pause(phy);
+
+ /* Display what we found */
+ phy_attached_info(phy);
+ } else if (np && !of_get_child_by_name(np, "mdio")) {
+ /* Support legacy ASPEED devicetree descriptions that decribe a
+ * MAC with an embedded MDIO controller but have no "mdio"
+ * child node. Automatically scan the MDIO bus for available
+ * PHYs.
+ */
priv->use_ncsi = false;
err = ftgmac100_setup_mdio(netdev);
if (err)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f38c3fa7d705..b4b82b9c5cd6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -485,7 +485,7 @@ static struct dpaa_bp *dpaa_bpid2pool(int bpid)
static bool dpaa_bpid2pool_use(int bpid)
{
if (dpaa_bpid2pool(bpid)) {
- atomic_inc(&dpaa_bp_array[bpid]->refs);
+ refcount_inc(&dpaa_bp_array[bpid]->refs);
return true;
}
@@ -496,7 +496,7 @@ static bool dpaa_bpid2pool_use(int bpid)
static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
{
dpaa_bp_array[bpid] = dpaa_bp;
- atomic_set(&dpaa_bp->refs, 1);
+ refcount_set(&dpaa_bp->refs, 1);
}
static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
@@ -584,7 +584,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
if (!bp)
return;
- if (!atomic_dec_and_test(&bp->refs))
+ if (!refcount_dec_and_test(&bp->refs))
return;
if (bp->free_buf_cb)
@@ -1958,7 +1958,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* populate the rest of SGT entries */
for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- frag_len = frag->size;
+ frag_len = skb_frag_size(frag);
WARN_ON(!skb_frag_page(frag));
addr = skb_frag_dma_map(dev, frag, 0,
frag_len, dma_dir);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index af320f83c742..f7e59e8db075 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -32,6 +32,7 @@
#define __DPAA_H
#include <linux/netdevice.h>
+#include <linux/refcount.h>
#include <soc/fsl/qman.h>
#include <soc/fsl/bman.h>
@@ -99,7 +100,7 @@ struct dpaa_bp {
int (*seed_cb)(struct dpaa_bp *);
/* bpool can be emptied before freeing by this cb */
void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
- atomic_t refs;
+ refcount_t refs;
};
struct dpaa_rx_errors {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index a027f4a9d0cc..a9afe46b837f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -164,70 +164,30 @@ static const struct file_operations dpaa2_dbg_ch_ops = {
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
- if (!dpaa2_dbg_root)
- return;
+ struct dentry *dir;
/* Create a directory for the interface */
- priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
- dpaa2_dbg_root);
- if (!priv->dbg.dir) {
- netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
- return;
- }
+ dir = debugfs_create_dir(priv->net_dev->name, dpaa2_dbg_root);
+ priv->dbg.dir = dir;
/* per-cpu stats file */
- priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_cpu_ops);
- if (!priv->dbg.cpu_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_cpu_stats;
- }
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops);
/* per-fq stats file */
- priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_fq_ops);
- if (!priv->dbg.fq_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_fq_stats;
- }
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops);
/* per-fq stats file */
- priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_ch_ops);
- if (!priv->dbg.fq_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_ch_stats;
- }
-
- return;
-
-err_ch_stats:
- debugfs_remove(priv->dbg.fq_stats);
-err_fq_stats:
- debugfs_remove(priv->dbg.cpu_stats);
-err_cpu_stats:
- debugfs_remove(priv->dbg.dir);
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops);
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
{
- debugfs_remove(priv->dbg.fq_stats);
- debugfs_remove(priv->dbg.ch_stats);
- debugfs_remove(priv->dbg.cpu_stats);
- debugfs_remove(priv->dbg.dir);
+ debugfs_remove_recursive(priv->dbg.dir);
}
void dpaa2_eth_dbg_init(void)
{
dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
- if (!dpaa2_dbg_root) {
- pr_err("DPAA2-ETH: debugfs create failed\n");
- return;
- }
-
pr_debug("DPAA2-ETH: debugfs created\n");
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
index 4f63de997a26..15598b28f03b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -11,9 +11,6 @@ struct dpaa2_eth_priv;
struct dpaa2_debugfs {
struct dentry *dir;
- struct dentry *fq_stats;
- struct dentry *ch_stats;
- struct dentry *cpu_stats;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0acb11557ed1..5402867272be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1208,9 +1208,37 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
+static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
+{
+ struct dpni_taildrop td = {0};
+ int i, err;
+
+ if (priv->rx_td_enabled == enable)
+ return;
+
+ td.enable = enable;
+ td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type != DPAA2_RX_FQ)
+ continue;
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
+ priv->fq[i].flowid, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop() failed\n");
+ break;
+ }
+ }
+
+ priv->rx_td_enabled = enable;
+}
+
static int link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
+ bool tx_pause;
int err;
err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
@@ -1220,11 +1248,18 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
return err;
}
+ /* If Tx pause frame settings have changed, we need to update
+ * Rx FQ taildrop configuration as well. We configure taildrop
+ * only when pause frame generation is disabled.
+ */
+ tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
+ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
+ dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+
/* Chech link state; speed / duplex changes are not treated yet */
if (priv->link_state.up == state.up)
- return 0;
+ goto out;
- priv->link_state = state;
if (state.up) {
netif_carrier_on(priv->net_dev);
netif_tx_start_all_queues(priv->net_dev);
@@ -1236,6 +1271,9 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
netdev_info(priv->net_dev, "Link Event: state %s\n",
state.up ? "up" : "down");
+out:
+ priv->link_state = state;
+
return 0;
}
@@ -2443,6 +2481,33 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
priv->enqueue = dpaa2_eth_enqueue_fq;
}
+static int set_pause(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_link_cfg link_cfg = {0};
+ int err;
+
+ /* Get the default link options so we don't override other flags */
+ err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_get_link_cfg() failed\n");
+ return err;
+ }
+
+ /* By default, enable both Rx and Tx pause frames */
+ link_cfg.options |= DPNI_LINK_OPT_PAUSE;
+ link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_link_cfg() failed\n");
+ return err;
+ }
+
+ priv->link_state.options = link_cfg.options;
+
+ return 0;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -2498,6 +2563,13 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
set_enqueue_mode(priv);
+ /* Enable pause frame support */
+ if (dpaa2_eth_has_pause_support(priv)) {
+ err = set_pause(priv);
+ if (err)
+ goto close;
+ }
+
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
if (!priv->cls_rules)
@@ -2529,7 +2601,6 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
struct dpni_queue_id qid;
- struct dpni_taildrop td;
int err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
@@ -2554,15 +2625,6 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
return err;
}
- td.enable = 1;
- td.threshold = DPAA2_ETH_TAILDROP_THRESH;
- err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
- DPNI_QUEUE_RX, 0, fq->flowid, &td);
- if (err) {
- dev_err(dev, "dpni_set_threshold() failed\n");
- return err;
- }
-
/* xdp_rxq setup */
err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
fq->flowid);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 9af18c24221f..8a0e65b3267f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -392,6 +392,7 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_drv_stats __percpu *percpu_extras;
u16 mc_token;
+ u8 rx_td_enabled;
struct dpni_link_state link_state;
bool do_link_poll;
@@ -476,6 +477,12 @@ enum dpaa2_eth_rx_dist {
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
+#define DPNI_PAUSE_VER_MAJOR 7
+#define DPNI_PAUSE_VER_MINOR 13
+#define dpaa2_eth_has_pause_support(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
+ DPNI_PAUSE_VER_MINOR) >= 0)
+
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 7b182f4b263c..93076fe01b45 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -78,71 +78,67 @@ static int
dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *link_settings)
{
- struct dpni_link_state state = {0};
- int err = 0;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
- if (err) {
- netdev_err(net_dev, "ERROR %d getting link state\n", err);
- goto out;
- }
-
- /* At the moment, we have no way of interrogating the DPMAC
- * from the DPNI side - and for that matter there may exist
- * no DPMAC at all. So for now we just don't report anything
- * beyond the DPNI attributes.
- */
- if (state.options & DPNI_LINK_OPT_AUTONEG)
- link_settings->base.autoneg = AUTONEG_ENABLE;
- if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+ link_settings->base.autoneg = AUTONEG_DISABLE;
+ if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
link_settings->base.duplex = DUPLEX_FULL;
- link_settings->base.speed = state.rate;
+ link_settings->base.speed = priv->link_state.rate;
-out:
- return err;
+ return 0;
}
-#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
-#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
-static int
-dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
- const struct ethtool_link_ksettings *link_settings)
+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
{
- struct dpni_link_cfg cfg = {0};
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int err = 0;
+ u64 link_options = priv->link_state.options;
- /* If using an older MC version, the DPNI must be down
- * in order to be able to change link settings. Taking steps to let
- * the user know that.
- */
- if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
- DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
- if (netif_running(net_dev)) {
- netdev_info(net_dev, "Interface must be brought down first.\n");
- return -EACCES;
- }
+ pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
+ pause->tx_pause = pause->rx_pause ^
+ !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+ pause->autoneg = AUTONEG_DISABLE;
+}
+
+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg cfg = {0};
+ int err;
+
+ if (!dpaa2_eth_has_pause_support(priv)) {
+ netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
+ DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
+ return -EOPNOTSUPP;
}
- cfg.rate = link_settings->base.speed;
- if (link_settings->base.autoneg == AUTONEG_ENABLE)
- cfg.options |= DPNI_LINK_OPT_AUTONEG;
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ cfg.rate = priv->link_state.rate;
+ cfg.options = priv->link_state.options;
+ if (pause->rx_pause)
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
else
- cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
- if (link_settings->base.duplex == DUPLEX_HALF)
- cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ if (!!pause->rx_pause ^ !!pause->tx_pause)
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
else
- cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
+ if (cfg.options == priv->link_state.options)
+ return 0;
err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
- if (err)
- /* ethtool will be loud enough if we return an error; no point
- * in putting our own error message on the console by default
- */
- netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_state failed\n");
+ return err;
+ }
- return err;
+ priv->link_state.options = cfg.options;
+
+ return 0;
}
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
@@ -721,7 +717,8 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
- .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+ .get_pauseparam = dpaa2_eth_get_pauseparam,
+ .set_pauseparam = dpaa2_eth_set_pauseparam,
.get_sset_count = dpaa2_eth_get_sset_count,
.get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
.get_strings = dpaa2_eth_get_strings,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 7b44d7d9b19a..d9b6918807af 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -84,6 +84,7 @@
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
+#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -284,7 +285,7 @@ struct dpni_rsp_get_statistics {
__le64 counter[DPNI_STATISTICS_CNT];
};
-struct dpni_cmd_set_link_cfg {
+struct dpni_cmd_link_cfg {
/* cmd word 0 */
__le64 pad0;
/* cmd word 1 */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 220dfc806a24..05e30893dee6 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -838,13 +838,13 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
const struct dpni_link_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_link_cfg *cmd_params;
+ struct dpni_cmd_link_cfg *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
cmd_flags,
token);
- cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cmd_params = (struct dpni_cmd_link_cfg *)cmd.params;
cmd_params->rate = cpu_to_le32(cfg->rate);
cmd_params->options = cpu_to_le64(cfg->options);
@@ -853,6 +853,42 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_get_link_cfg() - return the link configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration from dpni object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_link_cfg *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_cmd_link_cfg *)cmd.params;
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+ cfg->options = le64_to_cpu(rsp_params->options);
+
+ return err;
+}
+
+/**
* dpni_get_link_state() - Return the link state (either up or down)
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index a521242e2353..3e8fc6c7a8da 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -485,6 +485,11 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
u16 token,
const struct dpni_link_cfg *cfg);
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg);
+
/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 04a59db03f2b..c219587bd334 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -20,6 +20,15 @@ config FSL_ENETC_VF
If compiled as module (M), the module name is fsl-enetc-vf.
+config FSL_ENETC_MDIO
+ tristate "ENETC MDIO driver"
+ depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ help
+ This driver supports NXP ENETC Central MDIO controller as a PCIe
+ physical function (PF) device.
+
+ If compiled as module (M), the module name is fsl-enetc-mdio.
+
config FSL_ENETC_PTP_CLOCK
tristate "ENETC PTP clock driver"
depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF)
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 7139e414dccf..d200c27c3bf6 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -1,19 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
+
+common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \
- enetc_mdio.o
+fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
-fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y)
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy)
-fsl-enetc-vf-objs := enetc_vf.o
-else
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \
- enetc_ethtool.o
-fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y)
-endif
+obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
+fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
-fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o
+fsl-enetc-ptp-y := enetc_ptp.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 223709443ea4..b6ff89307409 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -110,7 +110,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
int active_offloads)
{
struct enetc_tx_swbd *tx_swbd;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
union enetc_tx_bd *txbd;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 77b9cd10ba2b..149883c8f0b8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -6,18 +6,20 @@
#include <linux/iopoll.h>
#include <linux/of.h>
-#include "enetc_pf.h"
+#include "enetc_mdio.h"
-struct enetc_mdio_regs {
- u32 mdio_cfg; /* MDIO configuration and status */
- u32 mdio_ctl; /* MDIO control */
- u32 mdio_data; /* MDIO data */
- u32 mdio_addr; /* MDIO address */
-};
+#define ENETC_MDIO_REG_OFFSET 0x1c00
+#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */
+#define ENETC_MDIO_CTL 0x4 /* MDIO control */
+#define ENETC_MDIO_DATA 0x8 /* MDIO data */
+#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv)
+#define enetc_mdio_rd(hw, off) \
+ enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET)
+#define enetc_mdio_wr(hw, off, val) \
+ enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val)
+#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off)
-#define ENETC_MDIO_REG_OFFSET 0x1c00
#define ENETC_MDC_DIV 258
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
@@ -33,18 +35,18 @@ struct enetc_mdio_regs {
#define MDIO_DATA(x) ((x) & 0xffff)
#define TIMEOUT 1000
-static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs)
+static int enetc_mdio_wait_complete(struct enetc_hw *hw)
{
u32 val;
- return readx_poll_timeout(enetc_rd_reg, &regs->mdio_cfg, val,
+ return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
!(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
}
-static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
- struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr;
int ret;
@@ -59,38 +61,39 @@ static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+ enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+ enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
}
/* write the value */
- enetc_wr_reg(&regs->mdio_data, MDIO_DATA(value));
+ enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value));
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
return 0;
}
-static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
- struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr, value;
int ret;
@@ -104,41 +107,41 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+ enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+ enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
}
/* initiate the read */
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_rd_reg(&regs->mdio_cfg) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_rd_reg(&regs->mdio_data) & 0xffff;
+ value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff;
return value;
}
@@ -146,12 +149,12 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
int enetc_mdio_probe(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_regs __iomem *regs;
+ struct enetc_mdio_priv *mdio_priv;
struct device_node *np;
struct mii_bus *bus;
- int ret;
+ int err;
- bus = mdiobus_alloc_size(sizeof(regs));
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@@ -159,41 +162,31 @@ int enetc_mdio_probe(struct enetc_pf *pf)
bus->read = enetc_mdio_read;
bus->write = enetc_mdio_write;
bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
- /* store the enetc mdio base address for this bus */
- regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET;
- bus->priv = regs;
-
np = of_get_child_by_name(dev->of_node, "mdio");
if (!np) {
dev_err(dev, "MDIO node missing\n");
- ret = -EINVAL;
- goto err_registration;
+ return -EINVAL;
}
- ret = of_mdiobus_register(bus, np);
- if (ret) {
+ err = of_mdiobus_register(bus, np);
+ if (err) {
of_node_put(np);
dev_err(dev, "cannot register MDIO bus\n");
- goto err_registration;
+ return err;
}
of_node_put(np);
pf->mdio = bus;
return 0;
-
-err_registration:
- mdiobus_free(bus);
-
- return ret;
}
void enetc_mdio_remove(struct enetc_pf *pf)
{
- if (pf->mdio) {
+ if (pf->mdio)
mdiobus_unregister(pf->mdio);
- mdiobus_free(pf->mdio);
- }
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
new file mode 100644
index 000000000000..60c9a3889824
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#include <linux/phy.h>
+#include "enetc_pf.h"
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+};
+
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
new file mode 100644
index 000000000000..fbd41ce01f06
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+#include <linux/of_mdio.h>
+#include "enetc_mdio.h"
+
+#define ENETC_MDIO_DEV_ID 0xee01
+#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
+#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
+#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+
+static int enetc_pci_mdio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = &pdev->dev;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int err;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = ENETC_MDIO_BUS_NAME;
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(dev, "device enable failed\n");
+ return err;
+ }
+
+ err = pci_request_region(pdev, 0, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "pci_request_region failed\n");
+ goto err_pci_mem_reg;
+ }
+
+ hw->port = pci_iomap(pdev, 0, 0);
+ if (!hw->port) {
+ err = -ENXIO;
+ dev_err(dev, "iomap failed\n");
+ goto err_ioremap;
+ }
+
+ err = of_mdiobus_register(bus, dev->of_node);
+ if (err)
+ goto err_mdiobus_reg;
+
+ pci_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_mdiobus_reg:
+ iounmap(mdio_priv->hw->port);
+err_ioremap:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void enetc_pci_mdio_remove(struct pci_dev *pdev)
+{
+ struct mii_bus *bus = pci_get_drvdata(pdev);
+ struct enetc_mdio_priv *mdio_priv;
+
+ mdiobus_unregister(bus);
+ mdio_priv = bus->priv;
+ iounmap(mdio_priv->hw->port);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_pci_mdio_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
+
+static struct pci_driver enetc_pci_mdio_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_pci_mdio_id_table,
+ .probe = enetc_pci_mdio_probe,
+ .remove = enetc_pci_mdio_remove,
+};
+module_pci_driver(enetc_pci_mdio_driver);
+
+MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 258b3cb38a6f..7d6513ff8507 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -750,6 +750,7 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
{
struct enetc_pf *pf = enetc_si_priv(priv->si);
struct device_node *np = priv->dev->of_node;
+ struct device_node *mdio_np;
int err;
if (!np) {
@@ -773,7 +774,9 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
priv->phy_node = of_node_get(np);
}
- if (!of_phy_is_fixed_link(np)) {
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ of_node_put(mdio_np);
err = enetc_mdio_probe(pf);
if (err) {
of_node_put(priv->phy_node);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e5610a4da539..d4d4c72adf49 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -208,8 +208,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_ST_C45 (0)
#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_READ_C45 (3 << 28)
#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_OP_ADDR_WRITE (0)
#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
#define FEC_MMFR_TA (2 << 16)
@@ -365,7 +368,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- frag_len = skb_shinfo(skb)->frags[frag].size;
+ frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
/* Handle the last BD specially */
if (frag == nr_frags - 1) {
@@ -387,7 +390,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+ bufaddr = skb_frag_address(this_frag);
index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align ||
@@ -1767,7 +1770,8 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret = 0;
+ int ret = 0, frame_start, frame_addr, frame_op;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
@@ -1775,9 +1779,37 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
reinit_completion(&fep->mdio_done);
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ frame_op = FEC_MMFR_OP_READ_C45;
+
+ } else {
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
/* start a read op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
@@ -1804,7 +1836,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret;
+ int ret, frame_start, frame_addr;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
@@ -1814,9 +1847,33 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
reinit_completion(&fep->mdio_done);
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ } else {
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
/* start a write op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
FEC_MMFR_TA | FEC_MMFR_DATA(value),
fep->hwp + FEC_MII_DATA);
@@ -1828,6 +1885,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
ret = -ETIMEDOUT;
}
+out:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -3338,7 +3396,6 @@ fec_probe(struct platform_device *pdev)
struct fec_platform_data *pdata;
struct net_device *ndev;
int i, irq, ret = 0;
- struct resource *r;
const struct of_device_id *of_id;
static int dev_id;
struct device_node *np = pdev->dev.of_node, *phy_node;
@@ -3378,8 +3435,7 @@ fec_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+ fep->hwp = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fep->hwp)) {
ret = PTR_ERR(fep->hwp);
goto failed_ioremap;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5fad73b2e123..3981c06f082f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -501,7 +501,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
nr_frags = skb_shinfo(skb)->nr_frags;
frag = skb_shinfo(skb)->frags;
for (i = 0; i < nr_frags; i++, frag++) {
- if (!IS_ALIGNED(frag->page_offset, 4)) {
+ if (!IS_ALIGNED(skb_frag_off(frag), 4)) {
is_aligned = 0;
break;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7ea19e173339..412c0340fed9 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2005,8 +2005,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
- if (rx_queue->skb)
- dev_kfree_skb(rx_queue->skb);
+ dev_kfree_skb(rx_queue->skb);
for (i = 0; i < rx_queue->rx_ring_size; i++) {
struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 689f18e3100f..90ab7ade44c4 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -877,7 +877,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq <= 0) {
- dev_err(dev, "No irq resource\n");
ret = -ENODEV;
goto out_disconnect_phy;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 349970557c52..95a6b0926170 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -719,7 +719,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int len = frag->size;
+ int len = skb_frag_size(frag);
addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
ret = dma_mapping_error(priv->dev, addr);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 2235dd55fab2..a48396dd4ebb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -245,7 +245,7 @@ static int hns_nic_maybe_stop_tso(
int frag_num;
struct sk_buff *skb = *out_skb;
struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
size = skb_headlen(skb);
buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
@@ -309,7 +309,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
struct hnae_ring *ring = ring_data->ring;
struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int buf_num;
int seg_num;
dma_addr_t dma;
@@ -1182,6 +1182,8 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
+ phy_attached_info(phy_dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 75329ab775a6..f8a87f8ca983 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -47,6 +47,8 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
+ HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
};
/* below are per-VF mac-vlan subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 908d4f45c06a..528f6243cdc6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -46,7 +46,7 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
EXPORT_SYMBOL(hnae3_set_client_init_flag);
static int hnae3_get_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
+ struct hnae3_ae_dev *ae_dev)
{
int inited = 0;
@@ -104,7 +104,6 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
- int ret = 0;
if (!client)
return -ENODEV;
@@ -123,7 +122,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_init_client_instance(client, ae_dev);
+ int ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
@@ -164,7 +163,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
- int ret = 0;
+ int ret;
if (!ae_algo)
return;
@@ -258,7 +257,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0;
+ int ret;
if (!ae_dev)
return -ENODEV;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 48c7b70fc2c4..c4b7bf851a28 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -58,10 +58,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
- hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
- hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
@@ -85,13 +85,18 @@ struct hnae3_queue {
void __iomem *io_base;
struct hnae3_ae_algo *ae_algo;
struct hnae3_handle *handle;
- int tqp_index; /* index in a handle */
- u32 buf_size; /* size for hnae_desc->addr, preset by AE */
- u16 tx_desc_num;/* total number of tx desc */
- u16 rx_desc_num;/* total number of rx desc */
+ int tqp_index; /* index in a handle */
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 tx_desc_num; /* total number of tx desc */
+ u16 rx_desc_num; /* total number of rx desc */
};
-/*hnae3 loop mode*/
+struct hns3_mac_stats {
+ u64 tx_pause_cnt;
+ u64 rx_pause_cnt;
+};
+
+/* hnae3 loop mode */
enum hnae3_loop {
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
@@ -141,6 +146,12 @@ enum hnae3_reset_notify_type {
HNAE3_RESTORE_CLIENT,
};
+enum hnae3_hw_error_type {
+ HNAE3_PPU_POISON_ERROR,
+ HNAE3_CMDQ_ECC_ERROR,
+ HNAE3_IMP_RD_POISON_ERROR,
+};
+
enum hnae3_reset_type {
HNAE3_VF_RESET,
HNAE3_VF_FUNC_RESET,
@@ -179,6 +190,15 @@ struct hnae3_vector_info {
#define HNAE3_RING_GL_RX 0
#define HNAE3_RING_GL_TX 1
+#define HNAE3_FW_VERSION_BYTE3_SHIFT 24
+#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_FW_VERSION_BYTE2_SHIFT 16
+#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_FW_VERSION_BYTE1_SHIFT 8
+#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
+#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
@@ -196,7 +216,8 @@ struct hnae3_client_ops {
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
- enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
+ void (*process_hw_error)(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -289,6 +310,8 @@ struct hnae3_ae_dev {
* Remove multicast address from mac table
* update_stats()
* Update Old network device statistics
+ * get_mac_stats()
+ * get mac pause statistics including tx_cnt and rx_cnt
* get_ethtool_stats()
* Get ethtool network device statistics
* get_strings()
@@ -417,8 +440,8 @@ struct hnae3_ae_ops {
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae3_handle *handle, u64 *data);
- void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt);
+ void (*get_mac_stats)(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats);
void (*get_strings)(struct hnae3_handle *handle,
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
@@ -605,7 +628,7 @@ struct hnae3_handle {
struct pci_dev *pdev;
void *priv;
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
- u64 flags; /* Indicate the capabilities for this handle*/
+ u64 flags; /* Indicate the capabilities for this handle */
union {
struct net_device *netdev; /* first member */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index a4b937286f55..5cf4c1ecc5b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -8,6 +8,7 @@
#include "hns3_enet.h"
#define HNS3_DBG_READ_LEN 256
+#define HNS3_DBG_WRITE_LEN 1024
static struct dentry *hns3_dbgfs_root;
@@ -38,7 +39,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
if (queue_num >= h->kinfo.num_tqps) {
dev_err(&h->pdev->dev,
- "Queue number(%u) is out of range(%u)\n", queue_num,
+ "Queue number(%u) is out of range(0-%u)\n", queue_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
@@ -176,7 +177,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
}
if (q_num >= h->kinfo.num_tqps) {
- dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num,
+ dev_err(dev, "Queue number(%u) is out of range(0-%u)\n", q_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
@@ -187,14 +188,14 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
tx_index = (cnt == 1) ? value : tx_index;
if (tx_index >= ring->desc_num) {
- dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index,
+ dev_err(dev, "bd index(%u) is out of range(0-%u)\n", tx_index,
ring->desc_num - 1);
return -EINVAL;
}
tx_desc = &ring->desc[tx_index];
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
- dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr);
+ dev_info(dev, "(TX)addr: 0x%llx\n", tx_desc->addr);
dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
@@ -218,6 +219,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr);
+ dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info);
dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
@@ -237,16 +239,16 @@ static void hns3_dbg_help(struct hnae3_handle *h)
char printf_buf[HNS3_DBG_BUF_LEN];
dev_info(&h->pdev->dev, "available commands\n");
- dev_info(&h->pdev->dev, "queue info [number]\n");
+ dev_info(&h->pdev->dev, "queue info <number>\n");
dev_info(&h->pdev->dev, "queue map\n");
- dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
+ dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
if (!hns3_is_phys_func(h->pdev))
return;
dev_info(&h->pdev->dev, "dump fd tcam\n");
dev_info(&h->pdev->dev, "dump tc\n");
- dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
+ dev_info(&h->pdev->dev, "dump tm map <q_num>\n");
dev_info(&h->pdev->dev, "dump tm\n");
dev_info(&h->pdev->dev, "dump qos pause cfg\n");
dev_info(&h->pdev->dev, "dump qos pri map\n");
@@ -258,20 +260,20 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump mac tnl status\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
- strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
+ strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
HNS3_DBG_BUF_LEN - 1);
strncat(printf_buf + strlen(printf_buf),
- " [igu egu <prt_id>] [rpu <tc_queue_num>]",
+ " [igu egu <port_id>] [rpu <tc_queue_num>]",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
strncat(printf_buf + strlen(printf_buf),
- " [rtc] [ppp] [rcb] [tqp <q_num>]]\n",
+ " [rtc] [ppp] [rcb] [tqp <queue_num>]]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
- strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]",
+ strncat(printf_buf, "dump reg dcb <port_id> <pri_id> <pg_id>",
HNS3_DBG_BUF_LEN - 1);
- strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n",
+ strncat(printf_buf + strlen(printf_buf), " <rq_id> <nq_id> <qset_id>\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
}
@@ -322,6 +324,9 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
+ if (count > HNS3_DBG_WRITE_LEN)
+ return -ENOSPC;
+
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
if (!cmd_buf)
return count;
@@ -372,20 +377,11 @@ static const struct file_operations hns3_dbg_cmd_fops = {
void hns3_dbg_init(struct hnae3_handle *handle)
{
const char *name = pci_name(handle->pdev);
- struct dentry *pfile;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
- if (!handle->hnae3_dbgfs)
- return;
- pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
- &hns3_dbg_cmd_fops);
- if (!pfile) {
- debugfs_remove_recursive(handle->hnae3_dbgfs);
- handle->hnae3_dbgfs = NULL;
- dev_warn(&handle->pdev->dev, "create file for %s fail\n",
- name);
- }
+ debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+ &hns3_dbg_cmd_fops);
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
@@ -397,10 +393,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
{
hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
- if (!hns3_dbgfs_root) {
- pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
- return;
- }
}
void hns3_dbg_unregister_debugfs(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 310afa708831..9f3f8e3cb2c2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -28,6 +28,12 @@
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
+#define hns3_rl_err(fmt, ...) \
+ do { \
+ if (net_ratelimit()) \
+ netdev_err(fmt, ##__VA_ARGS__); \
+ } while (0)
+
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);
@@ -45,6 +51,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+#define HNS3_INNER_VLAN_TAG 1
+#define HNS3_OUTER_VLAN_TAG 2
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -220,9 +229,9 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
*/
-
- /* Default: enable interrupt coalescing self-adaptive and GL */
tqp_vector->tx_group.coal.gl_adapt_enable = 1;
tqp_vector->rx_group.coal.gl_adapt_enable = 1;
@@ -459,6 +468,9 @@ static int hns3_nic_net_open(struct net_device *netdev)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
hns3_config_xps(priv);
+
+ netif_dbg(h, drv, netdev, "net open\n");
+
return 0;
}
@@ -519,6 +531,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return 0;
+ netif_dbg(h, drv, netdev, "net stop\n");
+
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
@@ -956,16 +970,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
}
-static int hns3_fill_desc_vtags(struct sk_buff *skb,
- struct hns3_enet_ring *tx_ring,
- u32 *inner_vlan_flag,
- u32 *out_vlan_flag,
- u16 *inner_vtag,
- u16 *out_vtag)
+static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
+ struct sk_buff *skb)
{
-#define HNS3_TX_VLAN_PRIO_SHIFT 13
-
struct hnae3_handle *handle = tx_ring->tqp->handle;
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ if (!(skb->protocol == htons(ETH_P_8021Q) ||
+ skb_vlan_tag_present(skb)))
+ return 0;
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
* header is allowed in skb, otherwise it will cause RAS error.
@@ -976,8 +990,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
return -EINVAL;
if (skb->protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->tqp->handle->kinfo.netdev->features &
- NETIF_F_HW_VLAN_CTAG_TX)) {
+ !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
/* When HW VLAN acceleration is turned off, and the stack
* sets the protocol to 802.1q, the driver just need to
* set the protocol to the encapsulated ethertype.
@@ -987,45 +1000,107 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
if (skb_vlan_tag_present(skb)) {
- u16 vlan_tag;
-
- vlan_tag = skb_vlan_tag_get(skb);
- vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
-
/* Based on hw strategy, use out_vtag in two layer tag case,
* and use inner_vtag in one tag case.
*/
- if (skb->protocol == htons(ETH_P_8021Q)) {
- if (handle->port_base_vlan_state ==
- HNAE3_PORT_BASE_VLAN_DISABLE){
- hns3_set_field(*out_vlan_flag,
- HNS3_TXD_OVLAN_B, 1);
- *out_vtag = vlan_tag;
- } else {
- hns3_set_field(*inner_vlan_flag,
- HNS3_TXD_VLAN_B, 1);
- *inner_vtag = vlan_tag;
- }
- } else {
- hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
- *inner_vtag = vlan_tag;
- }
- } else if (skb->protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *vhdr;
- int rc;
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ rc = HNS3_OUTER_VLAN_TAG;
+ else
+ rc = HNS3_INNER_VLAN_TAG;
- rc = skb_cow_head(skb, 0);
- if (unlikely(rc < 0))
- return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
- vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
- << HNS3_TX_VLAN_PRIO_SHIFT);
+ skb->protocol = vlan_get_protocol(skb);
+ return rc;
}
+ rc = skb_cow_head(skb, 0);
+ if (unlikely(rc < 0))
+ return rc;
+
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
+ & VLAN_PRIO_MASK);
+
skb->protocol = vlan_get_protocol(skb);
return 0;
}
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc)
+{
+ u32 ol_type_vlan_len_msec = 0;
+ u32 type_cs_vlan_tso = 0;
+ u32 paylen = skb->len;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
+ u16 mss = 0;
+ int ret;
+
+ ret = hns3_handle_vtags(ring, skb);
+ if (unlikely(ret < 0)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_vlan_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ } else if (ret == HNS3_INNER_VLAN_TAG) {
+ inner_vtag = skb_vlan_tag_get(skb);
+ inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ } else if (ret == HNS3_OUTER_VLAN_TAG) {
+ out_vtag = skb_vlan_tag_get(skb);
+ out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ 1);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 ol4_proto, il4_proto;
+
+ skb_reset_mac_len(skb);
+
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l4_proto_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l2l3l4_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &paylen, &mss,
+ &type_cs_vlan_tso);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_tso_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+ }
+
+ /* Set txbd */
+ desc->tx.ol_type_vlan_len_msec =
+ cpu_to_le32(ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
+ desc->tx.paylen = cpu_to_le32(paylen);
+ desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+
+ return 0;
+}
+
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
unsigned int size, int frag_end,
enum hns_desc_type type)
@@ -1033,65 +1108,29 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
dma_addr_t dma;
if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
- u32 ol_type_vlan_len_msec = 0;
- u32 type_cs_vlan_tso = 0;
- u32 paylen = skb->len;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
- u16 mss = 0;
int ret;
- ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
- &ol_type_vlan_len_msec,
- &inner_vtag, &out_vtag);
+ ret = hns3_fill_skb_desc(ring, skb, desc);
if (unlikely(ret))
return ret;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 ol4_proto, il4_proto;
-
- skb_reset_mac_len(skb);
-
- ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret))
- return ret;
-
- ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- if (unlikely(ret))
- return ret;
-
- ret = hns3_set_tso(skb, &paylen, &mss,
- &type_cs_vlan_tso);
- if (unlikely(ret))
- return ret;
- }
-
- /* Set txbd */
- desc->tx.ol_type_vlan_len_msec =
- cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen = cpu_to_le32(paylen);
- desc->tx.mss = cpu_to_le16(mss);
- desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
- desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
-
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else {
- frag = (struct skb_frag_struct *)priv;
+ frag = (skb_frag_t *)priv;
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
if (unlikely(dma_mapping_error(dev, dma))) {
+ u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
@@ -1147,28 +1186,20 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return 0;
}
-static int hns3_nic_bd_num(struct sk_buff *skb)
+static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
{
- int size = skb_headlen(skb);
- int i, bd_num;
+ unsigned int bd_num;
+ int i;
/* if the total len is within the max bd limit */
if (likely(skb->len <= HNS3_MAX_BD_SIZE))
return skb_shinfo(skb)->nr_frags + 1;
- bd_num = hns3_tx_bd_count(size);
+ bd_num = hns3_tx_bd_count(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- int frag_bd_num;
-
- size = skb_frag_size(frag);
- frag_bd_num = hns3_tx_bd_count(size);
-
- if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
- return -ENOMEM;
-
- bd_num += frag_bd_num;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ bd_num += hns3_tx_bd_count(skb_frag_size(frag));
}
return bd_num;
@@ -1189,7 +1220,7 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
*/
static bool hns3_skb_need_linearized(struct sk_buff *skb)
{
- int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+ int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
unsigned int tot_len = 0;
int i;
@@ -1219,21 +1250,16 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- int bd_num;
+ unsigned int bd_num;
bd_num = hns3_nic_bd_num(skb);
- if (bd_num < 0)
- return bd_num;
-
- if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
struct sk_buff *new_skb;
- if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+ if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
+ !hns3_skb_need_linearized(skb))
goto out;
- bd_num = hns3_tx_bd_count(skb->len);
- if (unlikely(ring_space(ring) < bd_num))
- return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
if (!new_skb)
@@ -1241,6 +1267,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+ bd_num = hns3_nic_bd_num(new_skb);
+ if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
+ (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
+ return -ENOMEM;
+
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
u64_stats_update_end(&ring->syncp);
@@ -1290,7 +1321,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
&tx_ring_data(priv, skb->queue_mapping);
struct hns3_enet_ring *ring = ring_data->ring;
struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int next_to_use_head;
int buf_num;
int seg_num;
@@ -1314,9 +1345,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_end(&ring->syncp);
}
- if (net_ratelimit())
- netdev_err(netdev, "xmit error: %d!\n", buf_num);
-
+ hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
}
@@ -1482,7 +1511,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
tx_drop += ring->stats.sw_err_cnt;
+ tx_drop += ring->stats.tx_vlan_err;
+ tx_drop += ring->stats.tx_l4_proto_err;
+ tx_drop += ring->stats.tx_l2l3l4_err;
+ tx_drop += ring->stats.tx_tso_err;
tx_errors += ring->stats.sw_err_cnt;
+ tx_errors += ring->stats.tx_vlan_err;
+ tx_errors += ring->stats.tx_l4_proto_err;
+ tx_errors += ring->stats.tx_l2l3l4_err;
+ tx_errors += ring->stats.tx_tso_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1550,6 +1587,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
h = hns3_get_handle(netdev);
kinfo = &h->kinfo;
+ netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
+
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
}
@@ -1593,6 +1632,10 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
+ netif_dbg(h, drv, netdev,
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
+ vf, vlan, qos, vlan_proto);
+
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
qos, vlan_proto);
@@ -1611,6 +1654,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
+ netif_dbg(h, drv, netdev,
+ "change mtu from %u to %d\n", netdev->mtu, new_mtu);
+
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1680,15 +1726,12 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
/* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout
*/
- if (h->ae_algo->ops->update_stats &&
- h->ae_algo->ops->get_mac_pause_stats) {
- u64 tx_pause_cnt, rx_pause_cnt;
+ if (h->ae_algo->ops->get_mac_stats) {
+ struct hns3_mac_stats mac_stats;
- h->ae_algo->ops->update_stats(h, &ndev->stats);
- h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
- &rx_pause_cnt);
+ h->ae_algo->ops->get_mac_stats(h, &mac_stats);
netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
- tx_pause_cnt, rx_pause_cnt);
+ mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
}
hw_head = readl_relaxed(tx_ring->tqp->io_base +
@@ -1963,7 +2006,7 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
ops = ae_dev->ops;
/* request the reset */
- if (ops->reset_event) {
+ if (ops->reset_event && ops->get_reset_level) {
if (ae_dev->hw_err_reset_req) {
reset_type = ops->get_reset_level(ae_dev,
&ae_dev->hw_err_reset_req);
@@ -2067,7 +2110,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae3_page_order(ring);
+ unsigned int order = hns3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -2078,7 +2121,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae3_page_size(ring);
+ cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -2357,8 +2400,9 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
- netdev_err(ring->tqp->handle->kinfo.netdev,
- "hnae reserve buffer map failed.\n");
+ hns3_rl_err(ring->tqp_vector->napi.dev,
+ "alloc rx buffer failed: %d\n",
+ ret);
break;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -2381,7 +2425,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int size = le16_to_cpu(desc->rx.size);
- u32 truesize = hnae3_buf_size(ring);
+ u32 truesize = hns3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2396,7 +2440,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
- if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
+ if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given */
get_page(desc_cb->priv);
@@ -2443,9 +2487,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
&iph->daddr, 0);
} else {
- netdev_err(skb->dev,
- "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
- be16_to_cpu(type), depth);
+ hns3_rl_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
return -EFAULT;
}
@@ -2587,7 +2631,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
skb = ring->skb;
if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
+ hns3_rl_err(netdev, "alloc rx skb fail\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
@@ -2662,8 +2706,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
HNS3_RX_HEAD_SIZE);
if (unlikely(!new_skb)) {
- netdev_err(ring->tqp->handle->kinfo.netdev,
- "alloc rx skb frag fail\n");
+ hns3_rl_err(ring->tqp_vector->napi.dev,
+ "alloc rx fraglist skb fail\n");
return -ENXIO;
}
ring->frag_num = 0;
@@ -2678,7 +2722,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
}
if (ring->tail_skb) {
- head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->truesize += hns3_buf_size(ring);
head_skb->data_len += le16_to_cpu(desc->rx.size);
head_skb->len += le16_to_cpu(desc->rx.size);
skb = ring->tail_skb;
@@ -2895,24 +2939,22 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
- int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
- int num;
+ int recv_pkts = 0;
+ int recv_bds = 0;
+ int err, num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
- recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
- if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns3_nic_alloc_rx_buffers(ring,
- clean_count + unused_count);
- clean_count = 0;
+ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
unused_count = hns3_desc_unused(ring) -
ring->pending_buf;
}
@@ -2926,7 +2968,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
goto out;
} else if (unlikely(err)) { /* Do jump the err */
recv_bds += ring->pending_buf;
- clean_count += ring->pending_buf;
+ unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
continue;
@@ -2934,7 +2976,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
- clean_count += ring->pending_buf;
+ unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
@@ -2943,8 +2985,8 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
out:
/* Make all data has been write before submit */
- if (clean_count + unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
+ if (unused_count > 0)
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
return recv_pkts;
}
@@ -3574,7 +3616,7 @@ out:
return ret;
}
-static void hns3_fini_ring(struct hns3_enet_ring *ring)
+void hns3_fini_ring(struct hns3_enet_ring *ring)
{
hns3_free_desc(ring);
devm_kfree(ring_to_dev(ring), ring->desc_cb);
@@ -4165,8 +4207,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
static void hns3_store_coal(struct hns3_nic_priv *priv)
{
/* ethtool only support setting and querying one coal
- * configuation for now, so save the vector 0' coal
- * configuation here in order to restore it.
+ * configuration for now, so save the vector 0' coal
+ * configuration here in order to restore it.
*/
memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
sizeof(struct hns3_enet_coalesce));
@@ -4378,6 +4420,9 @@ int hns3_set_channels(struct net_device *netdev,
u16 org_tqp_num;
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (ch->rx_count || ch->tx_count)
return -EINVAL;
@@ -4392,6 +4437,10 @@ int hns3_set_channels(struct net_device *netdev,
if (kinfo->rss_size == new_tqp_num)
return 0;
+ netif_dbg(h, drv, netdev,
+ "set channels: tqp_num=%u, rxfh=%d\n",
+ new_tqp_num, rxfh_configured);
+
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
@@ -4421,12 +4470,36 @@ int hns3_set_channels(struct net_device *netdev,
return hns3_reset_notify(h, HNAE3_UP_CLIENT);
}
+static const struct hns3_hw_error_info hns3_hw_err[] = {
+ { .type = HNAE3_PPU_POISON_ERROR,
+ .msg = "PPU poison" },
+ { .type = HNAE3_CMDQ_ECC_ERROR,
+ .msg = "IMP CMDQ error" },
+ { .type = HNAE3_IMP_RD_POISON_ERROR,
+ .msg = "IMP RD poison" },
+};
+
+static void hns3_process_hw_error(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
+ if (hns3_hw_err[i].type == type) {
+ dev_err(&handle->pdev->dev, "Detected %s!\n",
+ hns3_hw_err[i].msg);
+ break;
+ }
+ }
+}
+
static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
.setup_tc = hns3_client_setup_tc,
.reset_notify = hns3_reset_notify,
+ .process_hw_error = hns3_process_hw_error,
};
/* hns3_init_module - Driver registration routine
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 848b866761df..2110fa3b4479 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -75,7 +75,7 @@ enum hns3_nic_state {
#define HNS3_TX_TIMEOUT (5 * HZ)
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
-#define HNS3_RING_MAX_PENDING 32768
+#define HNS3_RING_MAX_PENDING 32760
#define HNS3_RING_MIN_PENDING 24
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
@@ -195,7 +195,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_PER_FRAG 8
+#define HNS3_MAX_BD_NUM_NORMAL 8
+#define HNS3_MAX_BD_NUM_TSO 63
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
#define HNS3_VECTOR_GL0_OFFSET 0x100
@@ -301,7 +302,7 @@ struct hns3_desc_cb {
dma_addr_t dma; /* dma address of this desc */
void *buf; /* cpu addr for a desc */
- /* priv data for the desc, e.g. skb when use with ip stack*/
+ /* priv data for the desc, e.g. skb when use with ip stack */
void *priv;
u32 page_offset;
u32 length; /* length of the buffer */
@@ -324,11 +325,11 @@ enum hns3_pkt_l3type {
HNS3_L3_TYPE_MAC_PAUSE,
HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
- /* reserved for 0xA~0xB*/
+ /* reserved for 0xA~0xB */
HNS3_L3_TYPE_CNM = 0xc,
- /* reserved for 0xD~0xE*/
+ /* reserved for 0xD~0xE */
HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
@@ -353,7 +354,7 @@ enum hns3_pkt_ol3type {
HNS3_OL3_TYPE_IPV4_OPT = 4,
HNS3_OL3_TYPE_IPV6_EXT,
- /* reserved for 0x6~0xE*/
+ /* reserved for 0x6~0xE */
HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
@@ -377,6 +378,10 @@ struct ring_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
+ u64 tx_vlan_err;
+ u64 tx_l4_proto_err;
+ u64 tx_l2l3l4_err;
+ u64 tx_tso_err;
};
struct {
u64 rx_pkts;
@@ -547,6 +552,11 @@ union l4_hdr_info {
unsigned char *hdr;
};
+struct hns3_hw_error_info {
+ enum hnae3_hw_error_type type;
+ const char *msg;
+};
+
static inline int ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
@@ -608,9 +618,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae3_buf_size(_ring) ((_ring)->buf_size)
-#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
-#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring))
+#define hns3_buf_size(_ring) ((_ring)->buf_size)
+
+static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->buf_size > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
@@ -633,6 +652,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
+void hns3_fini_ring(struct hns3_enet_ring *ring);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
bool hns3_is_phys_func(struct pci_dev *pdev);
int hns3_clean_rx_ring(
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 5bff98a9b0dc..c52eccc1621d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -30,6 +30,10 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
+ HNS3_TQP_STAT("vlan_err", tx_vlan_err),
+ HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
+ HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
+ HNS3_TQP_STAT("tso_err", tx_tso_err),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -55,7 +59,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 3
+#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -85,6 +89,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_SERIAL_SERDES:
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
+ case HNAE3_LOOP_PHY:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -92,7 +97,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
break;
}
- if (ret)
+ if (ret || h->pdev->revision >= 0x21)
return ret;
if (en) {
@@ -139,7 +144,10 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
static void hns3_lp_setup_skb(struct sk_buff *skb)
{
+#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
+
struct net_device *ndev = skb->dev;
+ struct hnae3_handle *handle;
unsigned char *packet;
struct ethhdr *ethh;
unsigned int i;
@@ -155,7 +163,9 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
- ethh->h_dest[5] += 0x1f;
+ handle = hns3_get_handle(ndev);
+ if (handle->pdev->revision == 0x20)
+ ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
skb_reset_mac_header(skb);
@@ -311,6 +321,8 @@ static void hns3_self_test(struct net_device *ndev,
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
+ netif_dbg(h, drv, ndev, "self test start");
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -324,6 +336,10 @@ static void hns3_self_test(struct net_device *ndev,
st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
+ st_param[HNAE3_LOOP_PHY][1] =
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
+
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -374,6 +390,8 @@ static void hns3_self_test(struct net_device *ndev,
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
+
+ netif_dbg(h, drv, ndev, "self test end\n");
}
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
@@ -527,6 +545,7 @@ static void hns3_get_drvinfo(struct net_device *netdev,
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ u32 fw_version;
if (!h->ae_algo->ops->get_fw_version) {
netdev_err(netdev, "could not get fw version!\n");
@@ -545,8 +564,18 @@ static void hns3_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->bus_info));
drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- priv->ae_handle->ae_algo->ops->get_fw_version(h));
+ fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%lu.%lu.%lu.%lu",
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
}
static u32 hns3_get_link(struct net_device *netdev)
@@ -593,6 +622,10 @@ static int hns3_set_pauseparam(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ netif_dbg(h, drv, netdev,
+ "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
+ param->autoneg, param->rx_pause, param->tx_pause);
+
if (h->ae_algo->ops->set_pauseparam)
return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
param->rx_pause,
@@ -612,7 +645,7 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
&cmd->base.speed,
&cmd->base.duplex);
- /* 2.get link mode*/
+ /* 2.get link mode */
if (ops->get_link_mode)
ops->get_link_mode(h,
cmd->link_modes.supported,
@@ -681,7 +714,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static int hns3_check_ksettings_param(struct net_device *netdev,
+static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
@@ -726,12 +759,17 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- int ret = 0;
+ int ret;
/* Chip don't support this mode. */
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ netif_dbg(handle, drv, netdev,
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ netdev->phydev ? "phy" : "mac",
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
@@ -843,8 +881,8 @@ static int hns3_get_rxnfc(struct net_device *netdev,
}
}
-static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
- u32 tx_desc_num, u32 rx_desc_num)
+static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
+ u32 tx_desc_num, u32 rx_desc_num)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -857,21 +895,29 @@ static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
rx_desc_num;
}
-
- return hns3_init_all_ring(priv);
}
-static int hns3_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- int queue_num = h->kinfo.num_tqps;
- int ret;
+ struct hnae3_handle *handle = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ int i;
+
+ tmp_rings = kcalloc(handle->kinfo.num_tqps * 2,
+ sizeof(struct hns3_enet_ring), GFP_KERNEL);
+ if (!tmp_rings)
+ return NULL;
+ for (i = 0; i < handle->kinfo.num_tqps * 2; i++)
+ memcpy(&tmp_rings[i], priv->ring_data[i].ring,
+ sizeof(struct hns3_enet_ring));
+
+ return tmp_rings;
+}
+
+static int hns3_check_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
if (hns3_nic_resetting(ndev))
return -EBUSY;
@@ -887,6 +933,25 @@ static int hns3_set_ringparam(struct net_device *ndev,
return -EINVAL;
}
+ return 0;
+}
+
+static int hns3_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ bool if_running = netif_running(ndev);
+ u32 old_tx_desc_num, new_tx_desc_num;
+ u32 old_rx_desc_num, new_rx_desc_num;
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret, i;
+
+ ret = hns3_check_ringparam(ndev, param);
+ if (ret)
+ return ret;
+
/* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
@@ -896,6 +961,13 @@ static int hns3_set_ringparam(struct net_device *ndev,
old_rx_desc_num == new_rx_desc_num)
return 0;
+ tmp_rings = hns3_backup_ringparam(priv);
+ if (!tmp_rings) {
+ netdev_err(ndev,
+ "backup ring param failed by allocating memory fail\n");
+ return -ENOMEM;
+ }
+
netdev_info(ndev,
"Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
old_tx_desc_num, old_rx_desc_num,
@@ -904,22 +976,24 @@ static int hns3_set_ringparam(struct net_device *ndev,
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- return ret;
-
- ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num,
- new_rx_desc_num);
+ hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ ret = hns3_init_all_ring(priv);
if (ret) {
- ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
- if (ret) {
- netdev_err(ndev,
- "Revert to old bd num fail, ret=%d.\n", ret);
- return ret;
- }
+ netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ ret);
+
+ hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
+ old_rx_desc_num);
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ memcpy(priv->ring_data[i].ring, &tmp_rings[i],
+ sizeof(struct hns3_enet_ring));
+ } else {
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ hns3_fini_ring(&tmp_rings[i]);
}
+ kfree(tmp_rings);
+
if (if_running)
ret = ndev->netdev_ops->ndo_open(ndev);
@@ -973,6 +1047,9 @@ static int hns3_nway_reset(struct net_device *netdev)
return -EINVAL;
}
+ netif_dbg(handle, drv, netdev,
+ "nway reset (using %s)\n", phy ? "phy" : "mac");
+
if (phy)
return genphy_restart_aneg(phy);
@@ -1297,6 +1374,9 @@ static int hns3_set_fecparam(struct net_device *netdev,
if (!ops->set_fec)
return -EOPNOTSUPP;
fec_mode = eth_to_loc_fec(fec->fec);
+
+ netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
+
return ops->set_fec(handle, fec_mode);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 22f6acd45d9a..ecf58cfd253d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -103,14 +103,17 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
dma_addr_t dma = ring->desc_dma_addr;
struct hclge_dev *hdev = ring->dev;
struct hclge_hw *hw = &hdev->hw;
+ u32 reg_val;
if (ring->ring_type == HCLGE_TYPE_CSQ) {
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
upper_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
- ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
+ reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
@@ -383,6 +386,23 @@ err_csq:
return ret;
}
+static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+{
+ struct hclge_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
+
+ req = (struct hclge_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+ req->compat = cpu_to_le32(compat);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
int hclge_cmd_init(struct hclge_dev *hdev)
{
u32 version;
@@ -419,7 +439,24 @@ int hclge_cmd_init(struct hclge_dev *hdev)
}
hdev->fw_version = version;
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_firmware_compat_config(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 96840d8f3e24..4821fe08b5e4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -86,6 +86,8 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
@@ -221,6 +223,9 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+ /* MAC VLAN commands */
+ HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
+
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
@@ -257,6 +262,7 @@ enum hclge_opcode_type {
/* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013,
+ HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_INFO = 0x7104,
@@ -586,6 +592,12 @@ struct hclge_config_mac_mode_cmd {
u8 rsv[20];
};
+struct hclge_pf_rst_sync_cmd {
+#define HCLGE_PF_RST_ALL_VF_RDY_B 0
+ u8 all_vf_ready;
+ u8 rsv[23];
+};
+
#define HCLGE_CFG_SPEED_S 0
#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
@@ -762,6 +774,31 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
+#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
+#define HCLGE_SWITCH_ALW_LPBK_B 1U
+#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U
+#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U
+#define HCLGE_SWITCH_NO_MASK 0x0
+#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE
+#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD
+#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB
+#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7
+
+struct hclge_mac_vlan_switch_cmd {
+ u8 roce_sel;
+ u8 rsv1[3];
+ __le32 func_id;
+ u8 switch_param;
+ u8 rsv2[3];
+ u8 param_mask;
+ u8 rsv3[11];
+};
+
+enum hclge_mac_vlan_cfg_sel {
+ HCLGE_MAC_VLAN_NIC_SEL = 0,
+ HCLGE_MAC_VLAN_ROCE_SEL,
+};
+
#define HCLGE_ACCEPT_TAG1_B 0
#define HCLGE_ACCEPT_UNTAG1_B 1
#define HCLGE_PORT_INS_TAG1_EN_B 2
@@ -827,7 +864,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- u8 mac_add[6];
+ u8 mac_addr[6];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
@@ -877,6 +914,13 @@ struct hclge_reset_cmd {
u8 rsv[22];
};
+#define HCLGE_PF_RESET_DONE_BIT BIT(0)
+
+struct hclge_pf_rst_done_cmd {
+ u8 pf_rst_done;
+ u8 rsv[23];
+};
+
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
#define HCLGE_CMD_SERDES_DONE_B BIT(0)
@@ -906,8 +950,11 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
-#define HCLGE_NIC_CMQ_EN_B 16
-#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B)
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_NIC_SW_RST_RDY_B 16
+#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
+
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
@@ -1009,6 +1056,13 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
+#define HCLGE_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
+struct hclge_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index bac4ce13f6ae..816f92084138 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -198,9 +198,32 @@ static int hclge_client_setup_tc(struct hclge_dev *hdev)
return 0;
}
+static int hclge_notify_down_uinit(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+}
+
+static int hclge_notify_init_up(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
bool map_changed = false;
u8 num_tc = 0;
@@ -215,11 +238,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
return ret;
if (map_changed) {
- ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- return ret;
+ netif_dbg(h, drv, netdev, "set ets\n");
- ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
}
@@ -239,11 +260,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclge_notify_init_up(hdev);
if (ret)
return ret;
}
@@ -254,10 +271,8 @@ err_out:
if (!map_changed)
return ret;
- if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
- return ret;
+ hclge_notify_init_up(hdev);
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
@@ -300,6 +315,7 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
@@ -325,6 +341,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->tm_info.hw_pfc_map = pfc_map;
hdev->tm_info.pfc_en = pfc->pfc_en;
+ netif_dbg(h, drv, netdev,
+ "set pfc: pfc_en=%u, pfc_map=%u, num_tc=%u\n",
+ pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
+
hclge_tm_pfc_info_update(hdev);
return hclge_pause_setup_hw(hdev, false);
@@ -345,8 +365,11 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
+ netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
+
/* No support for LLD_MANAGED modes or CEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
(mode & DCB_CAP_DCBX_VER_CEE) ||
@@ -372,11 +395,7 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
if (ret)
return -EINVAL;
- ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- return ret;
-
- ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
@@ -398,17 +417,11 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
else
hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return hclge_notify_init_up(hdev);
err_out:
- if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
- return ret;
+ hclge_notify_init_up(hdev);
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index ab625c757a95..1c6b501fb7ca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -4,32 +4,92 @@
#include <linux/device.h>
#include "hclge_debugfs.h"
-#include "hclge_cmd.h"
#include "hclge_main.h"
#include "hclge_tm.h"
#include "hnae3.h"
+static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+ { .reg_type = "bios common",
+ .dfx_msg = &hclge_dbg_bios_common_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
+ .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
+ .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
+ .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
+ .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_2[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
+ .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
+ { .reg_type = "igu egu",
+ .dfx_msg = &hclge_dbg_igu_egu_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
+ .offset = HCLGE_DBG_DFX_IGU_OFFSET,
+ .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
+ { .reg_type = "rpu",
+ .dfx_msg = &hclge_dbg_rpu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
+ .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
+ { .reg_type = "rpu",
+ .dfx_msg = &hclge_dbg_rpu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
+ .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
+ { .reg_type = "ncsi",
+ .dfx_msg = &hclge_dbg_ncsi_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
+ .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
+ .cmd = HCLGE_OPC_DFX_NCSI_REG } },
+ { .reg_type = "rtc",
+ .dfx_msg = &hclge_dbg_rtc_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
+ .offset = HCLGE_DBG_DFX_RTC_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RTC_REG } },
+ { .reg_type = "ppp",
+ .dfx_msg = &hclge_dbg_ppp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
+ .offset = HCLGE_DBG_DFX_PPP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_PPP_REG } },
+ { .reg_type = "rcb",
+ .dfx_msg = &hclge_dbg_rcb_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
+ .offset = HCLGE_DBG_DFX_RCB_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RCB_REG } },
+ { .reg_type = "tqp",
+ .dfx_msg = &hclge_dbg_tqp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
+ .offset = HCLGE_DBG_DFX_TQP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_TQP_REG } },
+};
+
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{
- struct hclge_desc desc[4];
- int ret;
+#define HCLGE_GET_DFX_REG_TYPE_CNT 4
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
- desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int entries_per_desc;
+ int index;
+ int ret;
- ret = hclge_cmd_send(&hdev->hw, desc, 4);
- if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
dev_err(&hdev->pdev->dev,
- "get dfx bdnum fail, status is %d.\n", ret);
+ "get dfx bdnum fail, ret = %d\n", ret);
return ret;
}
- return (int)desc[offset / 6].data[offset % 6];
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ index = offset % entries_per_desc;
+ return (int)desc[offset / entries_per_desc].data[index];
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
@@ -50,35 +110,40 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "read reg cmd send fail, status is %d.\n", ret);
- return ret;
- }
-
+ "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
return ret;
}
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
- struct hclge_dbg_dfx_message *dfx_message,
- const char *cmd_buf, int msg_num,
- int offset, enum hclge_opcode_type cmd)
+ struct hclge_dbg_reg_type_info *reg_info,
+ const char *cmd_buf)
{
-#define BD_DATA_NUM 6
+#define IDX_OFFSET 1
+ const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
+ struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
struct hclge_desc *desc_src;
struct hclge_desc *desc;
+ int entries_per_desc;
int bd_num, buf_len;
+ int index = 0;
+ int min_num;
int ret, i;
- int index;
- int max;
- ret = kstrtouint(cmd_buf, 10, &index);
- index = (ret != 0) ? 0 : index;
+ if (*s) {
+ ret = kstrtouint(s, 0, &index);
+ index = (ret != 0) ? 0 : index;
+ }
- bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
- if (bd_num <= 0)
+ bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
+ if (bd_num <= 0) {
+ dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
+ reg_msg->offset, bd_num);
return;
+ }
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
@@ -88,22 +153,23 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
}
desc = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd);
- if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+ if (ret) {
kfree(desc_src);
return;
}
- max = (bd_num * BD_DATA_NUM) <= msg_num ?
- (bd_num * BD_DATA_NUM) : msg_num;
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
desc = desc_src;
- for (i = 0; i < max; i++) {
- ((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc;
+ for (i = 0; i < min_num; i++) {
+ if (i > 0 && (i % entries_per_desc) == 0)
+ desc++;
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message,
- desc->data[i % BD_DATA_NUM]);
+ desc->data[i % entries_per_desc]);
dfx_message++;
}
@@ -213,95 +279,25 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
- int msg_num;
-
- if (strncmp(&cmd_buf[9], "bios common", 11) == 0) {
- msg_num = sizeof(hclge_dbg_bios_common_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
- &cmd_buf[21], msg_num,
- HCLGE_DBG_DFX_BIOS_OFFSET,
- HCLGE_OPC_DFX_BIOS_COMMON_REG);
- } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) {
- msg_num = sizeof(hclge_dbg_ssu_reg_0) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_0_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_0);
-
- msg_num = sizeof(hclge_dbg_ssu_reg_1) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_1_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_1);
-
- msg_num = sizeof(hclge_dbg_ssu_reg_2) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_2_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_2);
- } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) {
- msg_num = sizeof(hclge_dbg_igu_egu_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
- &cmd_buf[17], msg_num,
- HCLGE_DBG_DFX_IGU_OFFSET,
- HCLGE_OPC_DFX_IGU_EGU_REG);
- } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rpu_reg_0) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RPU_0_OFFSET,
- HCLGE_OPC_DFX_RPU_REG_0);
-
- msg_num = sizeof(hclge_dbg_rpu_reg_1) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RPU_1_OFFSET,
- HCLGE_OPC_DFX_RPU_REG_1);
- } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) {
- msg_num = sizeof(hclge_dbg_ncsi_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
- &cmd_buf[14], msg_num,
- HCLGE_DBG_DFX_NCSI_OFFSET,
- HCLGE_OPC_DFX_NCSI_REG);
- } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rtc_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RTC_OFFSET,
- HCLGE_OPC_DFX_RTC_REG);
- } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) {
- msg_num = sizeof(hclge_dbg_ppp_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_PPP_OFFSET,
- HCLGE_OPC_DFX_PPP_REG);
- } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rcb_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RCB_OFFSET,
- HCLGE_OPC_DFX_RCB_REG);
- } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) {
- msg_num = sizeof(hclge_dbg_tqp_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_TQP_OFFSET,
- HCLGE_OPC_DFX_TQP_REG);
- } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) {
- hclge_dbg_dump_dcb(hdev, &cmd_buf[13]);
- } else {
+ struct hclge_dbg_reg_type_info *reg_info;
+ bool has_dump = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
+ reg_info = &hclge_dbg_reg_info[i];
+ if (!strncmp(cmd_buf, reg_info->reg_type,
+ strlen(reg_info->reg_type))) {
+ hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
+ has_dump = true;
+ }
+ }
+
+ if (strncmp(cmd_buf, "dcb", 3) == 0) {
+ hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
+ has_dump = true;
+ }
+
+ if (!has_dump) {
dev_info(&hdev->pdev->dev, "unknown command\n");
return;
}
@@ -325,11 +321,17 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
struct hclge_desc desc;
int i, ret;
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tc\n");
+ return;
+ }
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
return;
}
@@ -409,6 +411,12 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -425,7 +433,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
return;
err_tm_pg_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -537,7 +545,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
return;
err_tm_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -556,7 +564,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
int pri_id, ret;
u32 i;
- ret = kstrtouint(&cmd_buf[12], 10, &queue_id);
+ ret = kstrtouint(cmd_buf, 0, &queue_id);
queue_id = (ret != 0) ? 0 : queue_id;
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
@@ -590,6 +598,12 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
queue_id, qset_id, pri_id, tc_id);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
for (group_id = 0; group_id < 32; group_id++) {
@@ -620,7 +634,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
return;
err_tm_map_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -634,7 +648,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
+ dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
ret);
return;
}
@@ -658,7 +672,7 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "dump qos pri map fail, status is %d.\n", ret);
+ "dump qos pri map fail, ret = %d\n", ret);
return;
}
@@ -715,6 +729,34 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
rx_buf_cmd->shared_buf);
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev, "\n");
+ dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+ dev_info(&hdev->pdev->dev, "\n");
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
@@ -723,7 +765,6 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
if (ret)
goto err_qos_cmd_send;
- dev_info(&hdev->pdev->dev, "\n");
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
@@ -733,7 +774,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
@@ -755,37 +797,15 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
rx_com_thrd->com_thrd[i].high,
rx_com_thrd->com_thrd[i].low);
-
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
-
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev, "\n");
- dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
-
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
-
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
-
return;
err_qos_cmd_send:
dev_err(&hdev->pdev->dev,
- "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
+ "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
}
static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
@@ -825,9 +845,9 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- req0->index, req0->mac_add[0], req0->mac_add[1],
- req0->mac_add[2], req0->mac_add[3], req0->mac_add[4],
- req0->mac_add[5]);
+ req0->index, req0->mac_addr[0], req0->mac_addr[1],
+ req0->mac_addr[2], req0->mac_addr[3],
+ req0->mac_addr[4], req0->mac_addr[5]);
snprintf(printf_buf + strlen(printf_buf),
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
@@ -883,14 +903,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
+ /* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ /* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ /* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
@@ -940,7 +963,7 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "get firmware statistics bd number failed, ret=%d\n",
+ "get firmware statistics bd number failed, ret = %d\n",
ret);
return;
}
@@ -961,7 +984,7 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
if (ret) {
kfree(desc_src);
dev_err(&hdev->pdev->dev,
- "get firmware statistics failed, ret=%d\n", ret);
+ "get firmware statistics failed, ret = %d\n", ret);
return;
}
@@ -981,6 +1004,33 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
kfree(desc_src);
}
+#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
+
+static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
+ struct hclge_desc *desc, int *offset,
+ int *length)
+{
+#define HCLGE_CMD_DATA_NUM 6
+
+ int i;
+ int j;
+
+ for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
+ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
+ if (i == 0 && j == 0)
+ continue;
+
+ dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
+ *offset,
+ le32_to_cpu(desc[i].data[j]));
+ *offset += sizeof(u32);
+ *length -= sizeof(u32);
+ if (*length <= 0)
+ return;
+ }
+ }
+}
+
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
@@ -990,17 +1040,13 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
-#define HCLGE_CMD_DATA_NUM 6
- struct hclge_desc desc[5];
- u32 byte_offset;
- int bd_num = 5;
+ struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int offset;
int length;
int data0;
int ret;
- int i;
- int j;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
@@ -1026,22 +1072,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
if (ret)
return;
- byte_offset = offset;
- for (i = 0; i < bd_num; i++) {
- for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
- if (i == 0 && j == 0)
- continue;
-
- dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
- byte_offset,
- le32_to_cpu(desc[i].data[j]));
- byte_offset += sizeof(u32);
- length -= sizeof(u32);
- if (length <= 0)
- return;
- }
- }
- offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
+ hclge_ncl_config_data_print(hdev, desc, &offset, &length);
}
}
@@ -1067,6 +1098,9 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
+#define DUMP_REG "dump reg"
+#define DUMP_TM_MAP "dump tm map"
+
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1074,8 +1108,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_fd_tcam(hdev);
} else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
hclge_dbg_dump_tc(hdev);
- } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) {
- hclge_dbg_dump_tm_map(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
+ hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
} else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
hclge_dbg_dump_tm(hdev);
} else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
@@ -1086,8 +1120,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_qos_buf_cfg(hdev);
} else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
hclge_dbg_dump_mng_table(hdev);
- } else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
- hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
+ hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index d055fda41775..38b79321c4c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -4,6 +4,9 @@
#ifndef __HCLGE_DEBUGFS_H
#define __HCLGE_DEBUGFS_H
+#include <linux/etherdevice.h>
+#include "hclge_cmd.h"
+
#define HCLGE_DBG_BUF_LEN 256
#define HCLGE_DBG_MNG_TBL_MAX 64
@@ -63,9 +66,23 @@ struct hclge_dbg_bitmap_cmd {
};
};
+struct hclge_dbg_reg_common_msg {
+ int msg_num;
+ int offset;
+ enum hclge_opcode_type cmd;
+};
+
+#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
struct hclge_dbg_dfx_message {
int flag;
- char message[60];
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
+#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
+struct hclge_dbg_reg_type_info {
+ const char *reg_type;
+ struct hclge_dbg_dfx_message *dfx_msg;
+ struct hclge_dbg_reg_common_msg reg_msg;
};
#pragma pack()
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 0a7243825e7b..58c6231aaa00 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -637,8 +637,8 @@ static void hclge_log_error(struct device *dev, char *reg,
{
while (err->msg) {
if (err->int_msk & err_sts) {
- dev_warn(dev, "%s %s found [error status=0x%x]\n",
- reg, err->msg, err_sts);
+ dev_err(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
if (err->reset_level &&
err->reset_level != HNAE3_NONE_RESET)
set_bit(err->reset_level, reset_requests);
@@ -652,16 +652,11 @@ static void hclge_log_error(struct device *dev, char *reg,
* @desc: descriptor for describing the command
* @cmd: command opcode
* @flag: flag for extended command structure
- * @w_num: offset for setting the read interrupt type.
- * @int_type: select which type of the interrupt for which the error
- * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
*
* This function query the error info from hw register/s using command
*/
static int hclge_cmd_query_error(struct hclge_dev *hdev,
- struct hclge_desc *desc, u32 cmd,
- u16 flag, u8 w_num,
- enum hclge_err_int_type int_type)
+ struct hclge_desc *desc, u32 cmd, u16 flag)
{
struct device *dev = &hdev->pdev->dev;
int desc_num = 1;
@@ -673,8 +668,6 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
desc_num = 2;
}
- if (w_num)
- desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret)
@@ -872,8 +865,7 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
}
/* configure TM QCN hw errors */
- ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
- 0, 0, 0);
+ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0);
if (ret) {
dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
return ret;
@@ -938,32 +930,44 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
/* configure PPU error interrupts */
if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (en) {
- desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
- desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
- desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
- desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN);
+ desc[1].data[3] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN);
+ desc[1].data[4] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN);
}
- desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
- desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
- desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
- desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK);
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK);
+ desc[1].data[3] |=
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK);
desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
- desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2);
- desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK);
} else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
- desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN);
- desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK);
} else {
dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
return -EINVAL;
@@ -1171,8 +1175,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
- dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
- status);
+ dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
+ status);
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
@@ -1208,8 +1212,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 1));
if (status) {
- dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
- "rpu_rx_pkt_ecc_mbit_err");
+ dev_err(dev,
+ "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n");
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
@@ -1321,10 +1325,12 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
- if (status)
+ if (status) {
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
&hclge_ppu_pf_abnormal_int[0], status,
&ae_dev->hw_err_reset_req);
+ hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR);
+ }
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
@@ -1387,17 +1393,17 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
return ret;
}
- dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
- le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
- le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
- le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
- le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
- dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
- le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
- le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
+ dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
+ le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
+ le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
+ dev_err(dev, "AXI3: %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
+ le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
return 0;
}
@@ -1410,18 +1416,18 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
- HCLGE_CMD_FLAG_NEXT, 0, 0);
+ HCLGE_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
}
- dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
- le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
- le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
- le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
+ dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
+ le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
return 0;
}
@@ -1434,7 +1440,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
/* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
- 0, 0, 0);
+ 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
return ret;
@@ -1450,9 +1456,9 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
le32_to_cpu(desc[0].data[0]);
while (err->msg) {
if (err->int_msk == err_sts) {
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg,
- le32_to_cpu(desc[0].data[0]));
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg,
+ le32_to_cpu(desc[0].data[0]));
break;
}
err++;
@@ -1460,13 +1466,13 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
}
if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
- dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
- le32_to_cpu(desc[0].data[1]));
+ dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[1]));
}
if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
- dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
- le32_to_cpu(desc[0].data[2]));
+ dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[2]));
}
return 0;
@@ -1483,8 +1489,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
/* read RAS error interrupt status */
ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
- 0, 0, 0);
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
/* reset everything for now */
@@ -1495,10 +1500,10 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
- dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+ dev_err(dev, "ROCEE RAS AXI rresp error\n");
if (status & HCLGE_ROCEE_BERR_INT_MASK)
- dev_warn(dev, "ROCEE RAS AXI bresp error\n");
+ dev_err(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
@@ -1508,7 +1513,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
- dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
+ dev_err(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET;
ret = hclge_log_rocee_ecc_error(hdev);
@@ -1566,8 +1571,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
- enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
+ enum hnae3_reset_type reset_type;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21)
@@ -1649,16 +1654,16 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
- dev_warn(dev,
- "HNS Non-Fatal RAS error(status=0x%x) identified\n",
- status);
+ dev_err(dev,
+ "HNS Non-Fatal RAS error(status=0x%x) identified\n",
+ status);
hclge_handle_all_ras_errors(hdev);
}
/* Handling Non-fatal Rocee RAS errors */
if (hdev->pdev->revision >= 0x21 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
- dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n");
+ dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
@@ -1737,8 +1742,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
- vf_id, q_id);
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
@@ -1755,8 +1760,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vf(%u) failed %d!\n",
+ hdev->vport->vport_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
@@ -1802,8 +1807,8 @@ static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status)
- dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
- status);
+ dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
+ status);
/* clear all main PF MSIx errors */
ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
@@ -1997,7 +2002,7 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
/* Handle Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
- dev_warn(dev, "HNS hw error(RAS) identified during init\n");
+ dev_err(dev, "HNS hw error(RAS) identified during init\n");
hclge_handle_all_ras_errors(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 7ea8bb28a0cb..876fd81ad2f1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -5,6 +5,7 @@
#define __HCLGE_ERR_H
#include "hclge_main.h"
+#include "hnae3.h"
#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3fde5471e1c0..2b65f2799846 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -35,6 +35,25 @@
#define BUF_RESERVE_PERCENT 90
#define HCLGE_RESET_MAX_FAIL_CNT 5
+#define HCLGE_RESET_SYNC_TIME 100
+#define HCLGE_PF_RESET_SYNC_TIME 20
+#define HCLGE_PF_RESET_SYNC_CNT 1500
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
+
+#define HCLGE_LINK_STATUS_MS 10
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
@@ -317,6 +336,80 @@ static const u8 hclge_hash_key[] = {
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -364,9 +457,13 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
u16 i, k, n;
int ret;
- desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ /* This may be called inside atomic sections,
+ * so GFP_ATOMIC is more suitalbe here
+ */
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
+
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
if (ret) {
@@ -647,6 +744,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
count += 2;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+
+ if (hdev->hw.mac.phydev) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
+ }
+
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
hclge_tqps_get_sset_count(handle, stringset);
@@ -702,14 +805,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_tqps_get_stats(handle, p);
}
-static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt)
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
- *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+ hclge_update_stats(handle, NULL);
+
+ mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
@@ -1075,6 +1180,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
+
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1270,6 +1376,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
+ /* Set the init affinity based on pci func number */
+ i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
+ i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
+ cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
+ &hdev->affinity_mask);
+
return ret;
}
@@ -2499,22 +2611,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->mbx_service_task);
+ queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
+ &hdev->mbx_service_task);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->rst_service_task);
+ queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
+ &hdev->rst_service_task);
}
-static void hclge_task_schedule(struct hclge_dev *hdev)
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- (void)schedule_work(&hdev->service_task);
+ !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
+ hdev->hw_stats.stats_timer++;
+ hdev->fd_arfs_expire_timer++;
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ system_wq, &hdev->service_task,
+ delay_time);
+ }
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
@@ -2729,25 +2848,6 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
-static void hclge_service_timer(struct timer_list *t)
-{
- struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
-
- mod_timer(&hdev->service_timer, jiffies + HZ);
- hdev->hw_stats.stats_timer++;
- hdev->fd_arfs_expire_timer++;
- hclge_task_schedule(hdev);
-}
-
-static void hclge_service_complete(struct hclge_dev *hdev)
-{
- WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
-
- /* Flush memory before next watchdog */
- smp_mb__before_atomic();
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
-}
-
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
@@ -2763,9 +2863,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
* defer the processing of the mailbox events. Since, we would have not
* cleared RX CMDQ event this time we would receive again another
* interrupt from H/W just for the mailbox.
+ *
+ * check for vector0 reset event sources
*/
-
- /* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
@@ -2882,10 +2982,15 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
break;
}
- /* clear the source of interrupt if it is not cause by reset */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+
+ /* Enable interrupt if it is not cause by reset. And when
+ * clearval equal to 0, it means interrupt status may be
+ * cleared by hardware before driver reads status register.
+ * For this case, vector0 interrupt also should be enabled.
+ */
if (!clearval ||
event_cause == HCLGE_VECTOR0_EVENT_MBX) {
- hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2918,6 +3023,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
+static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
+ affinity_notify);
+
+ cpumask_copy(&hdev->affinity_mask, mask);
+}
+
+static void hclge_irq_affinity_release(struct kref *ref)
+{
+}
+
+static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
+{
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq,
+ &hdev->affinity_mask);
+
+ hdev->affinity_notify.notify = hclge_irq_affinity_notify;
+ hdev->affinity_notify.release = hclge_irq_affinity_release;
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
+ &hdev->affinity_notify);
+}
+
+static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
+{
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
+}
+
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -3105,6 +3240,71 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return 0;
}
+static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_sync_cmd *req;
+ struct hclge_desc desc;
+ int cnt = 0;
+ int ret;
+
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
+
+ do {
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* for compatible with old firmware, wait
+ * 100 ms for VF to stop IO
+ */
+ if (ret == -EOPNOTSUPP) {
+ msleep(HCLGE_RESET_SYNC_TIME);
+ return 0;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return ret;
+ } else if (req->all_vf_ready) {
+ return 0;
+ }
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
+ hclge_cmd_reuse_desc(&desc, true);
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
+
+ dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
+ return -ETIME;
+}
+
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ u16 i;
+
+ if (!client || !client->ops->process_hw_error ||
+ !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
+ client->ops->process_hw_error(&hdev->vport[i].nic, type);
+}
+
+static void hclge_handle_imp_error(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
+ hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
+ hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -3229,7 +3429,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
if (!clearval)
return;
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
+ /* For revision 0x20, the reset interrupt source
+ * can only be cleared after hardware reset done
+ */
+ if (hdev->pdev->revision == 0x20)
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
+ clearval);
+
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -3250,19 +3456,33 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev)
return ret;
}
-static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
-#define HCLGE_RESET_SYNC_TIME 100
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGE_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+}
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
u32 reg_val;
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
+ /* to confirm whether all running VF is ready
+ * before request PF reset
*/
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_sync_vf(hdev);
+ if (ret)
+ return ret;
+
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3279,15 +3499,19 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
+ /* to confirm whether all running VF is ready
+ * before request PF reset
*/
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_sync_vf(hdev);
+ if (ret)
+ return ret;
+
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
+ hclge_handle_imp_error(hdev);
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
@@ -3298,14 +3522,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* inform hardware that preparatory work is done */
msleep(HCLGE_RESET_SYNC_TIME);
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CMQ_ENABLE);
+ hclge_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
return ret;
}
-static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
@@ -3313,36 +3536,42 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
hdev->reset_pending);
return true;
- } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
- (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
- BIT(HCLGE_IMP_RESET_BIT))) {
+ } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_RESET_INT_M) {
dev_info(&hdev->pdev->dev,
- "reset failed because IMP Reset is pending\n");
+ "reset failed because new reset interrupt\n");
hclge_clear_reset_cause(hdev);
return false;
} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->reset_fail_cnt++;
- if (is_timeout) {
- set_bit(hdev->reset_type, &hdev->reset_pending);
- dev_info(&hdev->pdev->dev,
- "re-schedule to wait for hw reset done\n");
- return true;
- }
-
- dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
- hclge_clear_reset_cause(hdev);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
- mod_timer(&hdev->reset_timer,
- jiffies + HCLGE_RESET_INTERVAL);
-
- return false;
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule reset task(%d)\n",
+ hdev->reset_fail_cnt);
+ return true;
}
hclge_clear_reset_cause(hdev);
+
+ /* recover the handshake status when reset fail */
+ hclge_reset_handshake(hdev, true);
+
dev_err(&hdev->pdev->dev, "Reset fail!\n");
return false;
}
+static int hclge_set_rst_done(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_done_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
{
int ret = 0;
@@ -3353,10 +3582,18 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
+ case HNAE3_GLOBAL_RESET:
+ /* fall through */
+ case HNAE3_IMP_RESET:
+ ret = hclge_set_rst_done(hdev);
+ break;
default:
break;
}
+ /* clear up the handshake status after re-initialize done */
+ hclge_reset_handshake(hdev, false);
+
return ret;
}
@@ -3382,7 +3619,6 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- bool is_timeout = false;
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
@@ -3410,10 +3646,8 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset;
- if (hclge_reset_wait(hdev)) {
- is_timeout = true;
+ if (hclge_reset_wait(hdev))
goto err_reset;
- }
hdev->rst_stats.hw_reset_done_cnt++;
@@ -3458,14 +3692,22 @@ static void hclge_reset(struct hclge_dev *hdev)
hdev->reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
- del_timer(&hdev->reset_timer);
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ hdev->reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (hdev->reset_level != HNAE3_NONE_RESET)
+ set_bit(hdev->reset_level, &hdev->reset_request);
return;
err_reset_lock:
rtnl_unlock();
err_reset:
- if (hclge_reset_err_handle(hdev, is_timeout))
+ if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev);
}
@@ -3493,9 +3735,10 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time +
- HCLGE_RESET_INTERVAL)))
+ HCLGE_RESET_INTERVAL))) {
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- else if (hdev->default_reset_request)
+ } else if (hdev->default_reset_request)
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
@@ -3525,6 +3768,12 @@ static void hclge_reset_timer(struct timer_list *t)
{
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ /* if default_reset_request has no value, it means that this reset
+ * request has already be handled, so just return here
+ */
+ if (!hdev->default_reset_request)
+ return;
+
dev_info(&hdev->pdev->dev,
"triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
@@ -3606,7 +3855,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, service_task);
+ container_of(work, struct hclge_dev, service_task.work);
+
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev);
@@ -3621,7 +3872,8 @@ static void hclge_service_task(struct work_struct *work)
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
}
- hclge_service_complete(hdev);
+
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
@@ -4197,8 +4449,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- struct hclge_ctrl_vector_chain_cmd *req
- = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
enum hclge_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
@@ -5808,7 +6060,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
return -ENOSPC;
}
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5959,6 +6211,89 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
+static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
+ u8 switch_param, u8 param_mask)
+{
+ struct hclge_mac_vlan_switch_cmd *req;
+ struct hclge_desc desc;
+ u32 func_id;
+ int ret;
+
+ func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
+ req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
+ false);
+ req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
+ req->func_id = cpu_to_le32(func_id);
+ req->switch_param = switch_param;
+ req->param_mask = param_mask;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "set mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+}
+
+static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
+ int link_ret)
+{
+#define HCLGE_PHY_LINK_STATUS_NUM 200
+
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int i = 0;
+ int ret;
+
+ do {
+ ret = phy_read_status(phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "phy update link status fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (phydev->link == link_ret)
+ break;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
+}
+
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
+{
+#define HCLGE_MAC_LINK_STATUS_NUM 100
+
+ int i = 0;
+ int ret;
+
+ do {
+ ret = hclge_get_mac_link_status(hdev);
+ if (ret < 0)
+ return ret;
+ else if (ret == link_ret)
+ return 0;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ return -EBUSY;
+}
+
+static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
+ bool is_phy)
+{
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
+ int link_ret;
+
+ link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+
+ if (is_phy)
+ hclge_phy_link_status_wait(hdev, link_ret);
+
+ return hclge_mac_link_status_wait(hdev, link_ret);
+}
+
static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
{
struct hclge_config_mac_mode_cmd *req;
@@ -6001,14 +6336,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
-#define HCLGE_MAC_LINK_STATUS_MS 10
-#define HCLGE_MAC_LINK_STATUS_NUM 100
-#define HCLGE_MAC_LINK_STATUS_DOWN 0
-#define HCLGE_MAC_LINK_STATUS_UP 1
-
struct hclge_serdes_lb_cmd *req;
struct hclge_desc desc;
- int mac_link_ret = 0;
int ret, i = 0;
u8 loop_mode_b;
@@ -6031,10 +6360,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
if (en) {
req->enable = loop_mode_b;
req->mask = loop_mode_b;
- mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
} else {
req->mask = loop_mode_b;
- mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -6067,18 +6394,70 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
hclge_cfg_mac_mode(hdev, en);
- i = 0;
- do {
- /* serdes Internal loopback, independent of the network cable.*/
- msleep(HCLGE_MAC_LINK_STATUS_MS);
- ret = hclge_get_mac_link_status(hdev);
- if (ret == mac_link_ret)
- return 0;
- } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback config mac mode timeout\n");
- dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
+ return ret;
+}
- return -EBUSY;
+static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->suspended) {
+ ret = phy_suspend(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_resume(phydev);
+ if (ret)
+ return ret;
+
+ return phy_loopback(phydev, true);
+}
+
+static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_loopback(phydev, false);
+ if (ret)
+ return ret;
+
+ return phy_suspend(phydev);
+}
+
+static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret;
+
+ if (!phydev)
+ return -ENOTSUPP;
+
+ if (en)
+ ret = hclge_enable_phy_loopback(hdev, phydev);
+ else
+ ret = hclge_disable_phy_loopback(hdev, phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set phy loopback fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ hclge_cfg_mac_mode(hdev, en);
+
+ ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "phy loopback config mac mode timeout\n");
+
+ return ret;
}
static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
@@ -6110,6 +6489,20 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
int i, ret;
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
+ * the same, the packets are looped back in the SSU. If SSU loopback
+ * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
+ */
+ if (hdev->pdev->revision >= 0x21) {
+ u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
+
+ ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
+ HCLGE_SWITCH_ALW_LPBK_MASK);
+ if (ret)
+ return ret;
+ }
+
switch (loop_mode) {
case HNAE3_LOOP_APP:
ret = hclge_set_app_loopback(hdev, en);
@@ -6118,6 +6511,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PARALLEL_SERDES:
ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
break;
+ case HNAE3_LOOP_PHY:
+ ret = hclge_set_phy_loopback(hdev, en);
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -6160,10 +6556,13 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct hclge_dev *hdev = vport->back;
if (enable) {
- mod_timer(&hdev->service_timer, jiffies + HZ);
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
} else {
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
+ /* Set the DOWN flag here to disable the service to be
+ * scheduled again
+ */
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ cancel_delayed_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
}
@@ -6202,12 +6601,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
return;
}
for (i = 0; i < handle->kinfo.num_tqps; i++)
hclge_reset_tqp(handle, i);
+ hclge_config_mac_tnl_int(hdev, false);
+
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -6249,7 +6651,6 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
enum hclge_mac_vlan_tbl_opcode op)
{
struct hclge_dev *hdev = vport->back;
- int return_status = -EIO;
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
@@ -6260,52 +6661,53 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) {
- return_status = 0;
+ return 0;
} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n");
+ return -ENOSPC;
} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOSPC;
}
+
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_REMOVE) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"remove mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "remove mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
+
+ dev_err(&hdev->pdev->dev,
+ "remove mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_LKUP) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"lookup mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "lookup mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
- } else {
- return_status = -EINVAL;
+
dev_err(&hdev->pdev->dev,
- "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
- op);
+ "lookup mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
}
- return return_status;
+ dev_err(&hdev->pdev->dev,
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
+
+ return -EINVAL;
}
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
@@ -7019,7 +7421,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
dev_err(&hdev->pdev->dev,
- "Change uc mac err! invalid mac:%p.\n",
+ "Change uc mac err! invalid mac:%pM.\n",
new_addr);
return -EINVAL;
}
@@ -7124,7 +7526,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan, u8 qos,
+ bool is_kill, u16 vlan,
__be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
@@ -7235,7 +7637,7 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
}
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
- u16 vport_id, u16 vlan_id, u8 qos,
+ u16 vport_id, u16 vlan_id,
bool is_kill)
{
u16 vport_idx, vport_num = 0;
@@ -7245,7 +7647,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return 0;
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
- 0, proto);
+ proto);
if (ret) {
dev_err(&hdev->pdev->dev,
"Set %d vport vlan filter config fail, ret =%d.\n",
@@ -7532,7 +7934,7 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0, false);
+ vlan->vlan_id, false);
if (ret) {
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
@@ -7558,7 +7960,7 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan_id, 0,
+ vlan_id,
true);
list_del(&vlan->node);
@@ -7578,7 +7980,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0,
+ vlan->vlan_id,
true);
vlan->hd_tbl_status = false;
@@ -7611,7 +8013,7 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
- u16 vlan_proto, qos;
+ u16 vlan_proto;
u16 state, vlan_id;
int i;
@@ -7620,12 +8022,11 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
state = vport->port_base_vlan_cfg.state;
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id, qos,
+ vport->vport_id, vlan_id,
false);
continue;
}
@@ -7635,7 +8036,7 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0,
+ vlan->vlan_id,
false);
}
}
@@ -7675,12 +8076,12 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
htons(new_info->vlan_proto),
vport->vport_id,
new_info->vlan_tag,
- new_info->qos, false);
+ false);
}
ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
vport->vport_id, old_info->vlan_tag,
- old_info->qos, true);
+ true);
if (ret)
return ret;
@@ -7707,7 +8108,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
htons(vlan_info->vlan_proto),
vport->vport_id,
vlan_info->vlan_tag,
- vlan_info->qos, false);
+ false);
if (ret)
return ret;
@@ -7716,7 +8117,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
htons(old_vlan_info->vlan_proto),
vport->vport_id,
old_vlan_info->vlan_tag,
- old_vlan_info->qos, true);
+ true);
if (ret)
return ret;
@@ -7829,7 +8230,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
return -EBUSY;
}
- /* When port base vlan enabled, we use port base vlan as the vlan
+ /* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
* when user add new vlan or remove exist vlan, just update the vport
* vlan list. The vlan id in vlan list will be writen in vlan filter
@@ -7837,7 +8238,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
- vlan_id, 0, is_kill);
+ vlan_id, is_kill);
writen_to_tbl = true;
}
@@ -7848,7 +8249,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
} else if (is_kill) {
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
@@ -7873,7 +8274,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
while (vlan_id != VLAN_N_VID) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
- 0, true);
+ true);
if (ret && ret != -EINVAL)
return;
@@ -8044,11 +8445,12 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- /* Wait for tqp hw reset */
- msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
@@ -8082,11 +8484,12 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- /* Wait for tqp hw reset */
- msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
@@ -8122,28 +8525,15 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
int ret;
- if (rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_FULL;
- else if (rx_en && !tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
- else if (!rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
- else
- hdev->fc_mode_last_time = HCLGE_FC_NONE;
-
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
return 0;
ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
- if (ret) {
- dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
- ret);
- return ret;
- }
-
- hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "configure pauseparam error, ret = %d.\n", ret);
- return 0;
+ return ret;
}
int hclge_cfg_flowctrl(struct hclge_dev *hdev)
@@ -8208,6 +8598,21 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
+ u32 rx_en, u32 tx_en)
+{
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+}
+
static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
u32 rx_en, u32 tx_en)
{
@@ -8233,6 +8638,8 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
+
if (!auto_neg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
@@ -8481,7 +8888,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- return ret;
+ return 0;
clear_nic:
hdev->nic_client = NULL;
@@ -8602,12 +9009,10 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
if (hdev->reset_timer.function)
del_timer_sync(&hdev->reset_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
cancel_work_sync(&hdev->rst_service_task);
if (hdev->mbx_service_task.func)
@@ -8812,12 +9217,16 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev);
- timer_setup(&hdev->service_timer, hclge_service_timer, 0);
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
- INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
+ /* Setup affinity after service timer setup because add_timer_on
+ * is called in affinity notify.
+ */
+ hclge_misc_affinity_setup(hdev);
+
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
@@ -8842,7 +9251,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
- pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
return 0;
err_mdiobus_unreg:
@@ -8979,6 +9390,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
+ hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
if (mac->phydev)
@@ -9238,106 +9650,314 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
}
#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFFFFFFFF
+#define SEPARATOR_VALUE 0xFDFCFBFA
#define REG_NUM_PER_LINE 4
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+#define REG_SEPARATOR_LINE 1
+#define REG_NUM_REMAIN_MASK 3
+#define BD_LIST_MAX_NUM 30
-static int hclge_get_regs_len(struct hnae3_handle *handle)
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
+ /*prepare 4 commands to query DFX BD number*/
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, 4);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+#define HCLGE_DFX_REG_BD_NUM 4
+
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
int ret;
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return -EOPNOTSUPP;
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
}
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
- return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
- regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+ return ret;
}
-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
- int i, j, reg_um, separator_num;
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, separator_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ return reg_num + separator_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int data_len_per_desc, data_len, bd_num, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
int ret;
- *version = hdev->fw_version;
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ data_len = data_len_per_desc * bd_num;
+ *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+ }
+
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
+ struct hclge_desc *desc_src;
+ u32 *reg = data;
+ int ret;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return;
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
+ return -ENOMEM;
}
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ break;
+ }
+
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ kfree(desc_src);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num, separator_num;
+ int data_num_sum;
+ u32 *reg = data;
+
/* fetching per-PF registers valus from PF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum = reg_num + separator_num;
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum += reg_num + separator_num;
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < kinfo->num_tqps; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGE_RING_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
- 4 * j);
+ HCLGE_RING_INT_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int regs_lines_32_bit, regs_lines_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
+ regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int i, reg_num, separator_num, ret;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
- /* fetching PF common registers values from firmware */
ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get 32 bit register failed, ret = %d.\n", ret);
return;
}
+ reg_num = regs_num_32_bit;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
- reg += regs_num_32_bit;
ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg_num = regs_num_64_bit * 2;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
}
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
@@ -9452,7 +10072,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
- .get_mac_pause_stats = hclge_get_mac_pause_stat,
+ .get_mac_stats = hclge_get_mac_stat,
.update_stats = hclge_update_stats,
.get_strings = hclge_get_strings,
.get_sset_count = hclge_get_sset_count,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 6a12285f4c76..870550fa9ff1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -119,7 +119,7 @@
#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
(HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
-#define HCLGE_TQP_RESET_TRY_TIMES 10
+#define HCLGE_TQP_RESET_TRY_TIMES 200
#define HCLGE_PHY_PAGE_MDIX 0
#define HCLGE_PHY_PAGE_COPPER 0
@@ -148,6 +148,8 @@ enum HLCGE_PORT_TYPE {
NETWORK_PORT
};
+#define PF_VPORT_ID 0
+
#define HCLGE_PF_ID_S 0
#define HCLGE_PF_ID_M GENMASK(2, 0)
#define HCLGE_VF_ID_S 3
@@ -164,6 +166,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_IMP_RESET_BIT 2
+#define HCLGE_RESET_INT_M GENMASK(2, 0)
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -178,6 +181,8 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
+#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
@@ -302,6 +307,13 @@ enum hclge_fc_mode {
HCLGE_FC_DEFAULT
};
+enum hclge_link_fail_code {
+ HCLGE_LF_NORMAL,
+ HCLGE_LF_REF_CLOCK_LOST,
+ HCLGE_LF_XSFP_TX_DISABLE,
+ HCLGE_LF_XSFP_ABSENT,
+};
+
#define HCLGE_PG_NUM 4
#define HCLGE_SCH_MODE_SP 0
#define HCLGE_SCH_MODE_DWRR 1
@@ -532,50 +544,6 @@ struct key_info {
u8 key_length; /* use bit as unit */
};
-static const struct key_info meta_data_key_info[] = {
- { PACKET_TYPE_ID, 6},
- { IP_FRAGEMENT, 1},
- { ROCE_TYPE, 1},
- { NEXT_KEY, 5},
- { VLAN_NUMBER, 2},
- { SRC_VPORT, 12},
- { DST_VPORT, 12},
- { TUNNEL_PACKET, 1},
-};
-
-static const struct key_info tuple_key_info[] = {
- { OUTER_DST_MAC, 48},
- { OUTER_SRC_MAC, 48},
- { OUTER_VLAN_TAG_FST, 16},
- { OUTER_VLAN_TAG_SEC, 16},
- { OUTER_ETH_TYPE, 16},
- { OUTER_L2_RSV, 16},
- { OUTER_IP_TOS, 8},
- { OUTER_IP_PROTO, 8},
- { OUTER_SRC_IP, 32},
- { OUTER_DST_IP, 32},
- { OUTER_L3_RSV, 16},
- { OUTER_SRC_PORT, 16},
- { OUTER_DST_PORT, 16},
- { OUTER_L4_RSV, 32},
- { OUTER_TUN_VNI, 24},
- { OUTER_TUN_FLOW_ID, 8},
- { INNER_DST_MAC, 48},
- { INNER_SRC_MAC, 48},
- { INNER_VLAN_TAG_FST, 16},
- { INNER_VLAN_TAG_SEC, 16},
- { INNER_ETH_TYPE, 16},
- { INNER_L2_RSV, 16},
- { INNER_IP_TOS, 8},
- { INNER_IP_PROTO, 8},
- { INNER_SRC_IP, 32},
- { INNER_DST_IP, 32},
- { INNER_L3_RSV, 16},
- { INNER_SRC_PORT, 16},
- { INNER_DST_PORT, 16},
- { INNER_L4_RSV, 32},
-};
-
#define MAX_KEY_LENGTH 400
#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
@@ -806,9 +774,8 @@ struct hclge_dev {
u16 adminq_work_limit; /* Num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
- struct timer_list service_timer;
struct timer_list reset_timer;
- struct work_struct service_task;
+ struct delayed_work service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -864,6 +831,10 @@ struct hclge_dev {
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
+
+ /* affinity mask and notify for misc interrupt */
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
};
/* VPort level vlan tag configuration for TX direction */
@@ -990,7 +961,6 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
-int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
@@ -1018,4 +988,9 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state, u16 vlan_tag, u16 qos,
u16 vlan_proto);
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc);
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 690b9990215c..f5da28a60d00 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -479,7 +479,7 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
hclge_reset_vf_queue(vport, queue_id);
- /* send response msg to VF after queue reset complete*/
+ /* send response msg to VF after queue reset complete */
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
@@ -545,6 +545,36 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
HCLGE_RSS_MBX_RESP_LEN);
}
+static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
+{
+ switch (link_fail_code) {
+ case HCLGE_LF_REF_CLOCK_LOST:
+ dev_warn(&hdev->pdev->dev, "Reference clock lost!\n");
+ break;
+ case HCLGE_LF_XSFP_TX_DISABLE:
+ dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n");
+ break;
+ case HCLGE_LF_XSFP_ABSENT:
+ dev_warn(&hdev->pdev->dev, "SFP is absent!\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_handle_link_change_event(struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+#define LINK_STATUS_OFFSET 1
+#define LINK_FAIL_CODE_OFFSET 2
+
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
+
+ if (!req->msg[LINK_STATUS_OFFSET])
+ hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
+}
+
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
@@ -552,6 +582,15 @@ static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
return tail == hw->cmq.crq.next_to_use;
}
+static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+
+ ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
+ dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -707,6 +746,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF fail(%d) to media type for VF\n",
ret);
break;
+ case HCLGE_MBX_PUSH_LINK_STATUS:
+ hclge_handle_link_change_event(hdev, req);
+ break;
+ case HCLGE_MBX_NCSI_ERROR:
+ hclge_handle_ncsi_error(hdev);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index abb1b438564e..dc4dfd4602ab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -231,6 +231,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydev->advertising);
+ phy_attached_info(phydev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 3f41fa2bc414..e829101d576c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -404,8 +404,8 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_desc desc;
- u32 shapping_para = 0;
u8 ir_u, ir_b, ir_s;
+ u32 shapping_para;
int ret;
ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
@@ -650,12 +650,8 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
}
}
-static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
+static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
{
- if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
- (hdev->tm_info.num_pg != 1))
- return -EINVAL;
-
hclge_tm_pg_info_init(hdev);
hclge_tm_tc_info_init(hdev);
@@ -663,8 +659,6 @@ static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev);
hclge_pfc_info_init(hdev);
-
- return 0;
}
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -1428,15 +1422,15 @@ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
int hclge_tm_schd_init(struct hclge_dev *hdev)
{
- int ret;
-
/* fc_mode is HCLGE_FC_FULL on reset */
hdev->tm_info.fc_mode = HCLGE_FC_FULL;
hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
- ret = hclge_tm_schd_info_init(hdev);
- if (ret)
- return ret;
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
+ hdev->tm_info.num_pg != 1)
+ return -EINVAL;
+
+ hclge_tm_schd_info_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 652b796044e3..4c2c9458648f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -43,7 +43,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int clean = 0;
+ int clean;
u32 head;
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
@@ -97,7 +97,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGEVF_NIC_SW_RST_RDY;
+ reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
@@ -405,7 +407,15 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
}
hdev->fw_version = version;
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 127a434a56f3..f830eef02e5c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -244,8 +244,11 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
-#define HCLGEVF_NIC_CMQ_EN_B 16
-#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B)
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGEVF_NIC_SW_RST_RDY_B 16
+#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
+
#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a13a0e101c3b..594cae8c7410 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1269,7 +1269,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
@@ -1396,19 +1396,22 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
u32 val;
int ret;
- /* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
- dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
-
if (hdev->reset_type == HNAE3_FLR_RESET)
return hclgevf_flr_poll_timeout(hdev,
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_CNT);
-
- ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
- !(val & HCLGEVF_RST_ING_BITS),
- HCLGEVF_RESET_WAIT_US,
- HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else if (hdev->reset_type == HNAE3_VF_RESET)
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_VF_RST_ING, val,
+ !(val & HCLGEVF_VF_RST_ING_BIT),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
if (ret) {
@@ -1426,6 +1429,20 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
return 0;
}
+static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGEVF_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
+
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ reg_val);
+}
+
static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
{
int ret;
@@ -1448,7 +1465,14 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ if (ret)
+ return ret;
+
+ /* clear handshake status with IMP */
+ hclgevf_reset_handshake(hdev, false);
+
+ return 0;
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -1474,8 +1498,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CMQ_ENABLE);
+ hclgevf_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
hdev->reset_type, ret);
@@ -1484,6 +1507,8 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
{
+ /* recover handshake status with IMP when reset fail */
+ hclgevf_reset_handshake(hdev, true);
hdev->rst_stats.rst_fail_cnt++;
dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
hdev->rst_stats.rst_fail_cnt);
@@ -1494,9 +1519,6 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- } else {
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CMQ_ENABLE);
}
}
@@ -1539,7 +1561,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
rtnl_lock();
- /* now, re-initialize the nic client and ae device*/
+ /* now, re-initialize the nic client and ae device */
ret = hclgevf_reset_stack(hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
@@ -1762,9 +1784,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* 1b and 2. cases but we will not get any intimation about 1a
* from PF as cmdq would be in unreliable state i.e. mailbox
* communication between PF and VF would be broken.
- */
-
- /* if we are never geting into pending state it means either:
+ *
+ * if we are never geting into pending state it means either:
* 1. PF is not receiving our request which could be due to IMP
* reset
* 2. PF is screwed
@@ -1867,29 +1888,45 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
u32 *clearval)
{
- u32 cmdq_src_reg, rst_ing_reg;
+ u32 val, cmdq_stat_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
- cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_STAT_REG);
- if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
- *clearval = cmdq_src_reg;
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
hdev->rst_stats.vf_rst_cnt++;
+ /* set up VF hardware reset status, its PF will clear
+ * this status when PF has initialized done.
+ */
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
+ val | HCLGEVF_VF_RST_ING_BIT);
return HCLGEVF_VECTOR0_EVENT_RST;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
- if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
- *clearval = cmdq_src_reg;
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
+ /* for revision 0x21, clearing interrupt is writing bit 0
+ * to the clear register, writing bit 1 means to keep the
+ * old value.
+ * for revision 0x20, the clear register is a read & write
+ * register, so we should just write 0 to the bit we are
+ * handling, and keep other bits as cmdq_stat_reg.
+ */
+ if (hdev->pdev->revision >= 0x21)
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ else
+ *clearval = cmdq_stat_reg &
+ ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+
return HCLGEVF_VECTOR0_EVENT_MBX;
}
@@ -2265,7 +2302,7 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
- int ret = 0;
+ int ret;
hclgevf_get_misc_vector(hdev);
@@ -2695,7 +2732,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
}
hdev->last_reset_time = jiffies;
- pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
+ HCLGEVF_DRIVER_NAME);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 5a9e30998a8f..bdde3afc286b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -87,6 +87,8 @@
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -103,6 +105,9 @@
(HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
+#define HCLGEVF_VF_RST_ING 0x07008
+#define HCLGEVF_VF_RST_ING_BIT BIT(16)
+
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
#define HCLGEVF_RSS_KEY_SIZE 40
@@ -120,7 +125,7 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
-#define HCLGEVF_STATS_TIMER_INTERVAL (36)
+#define HCLGEVF_STATS_TIMER_INTERVAL 36U
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 6a96987bd8f0..a108191c9e50 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -277,9 +277,9 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
switch (msg_q[0]) {
case HCLGE_MBX_LINK_STAT_CHANGE:
- link_status = le16_to_cpu(msg_q[1]);
+ link_status = msg_q[1];
memcpy(&speed, &msg_q[2], sizeof(speed));
- duplex = (u8)le16_to_cpu(msg_q[4]);
+ duplex = (u8)msg_q[4];
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
@@ -287,7 +287,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
break;
case HCLGE_MBX_LINK_STAT_MODE:
- idx = (u8)le16_to_cpu(msg_q[1]);
+ idx = (u8)msg_q[1];
if (idx)
memcpy(&hdev->hw.mac.supported, &msg_q[2],
sizeof(unsigned long));
@@ -301,14 +301,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- reset_type = le16_to_cpu(msg_q[1]);
+ reset_type = (enum hnae3_reset_type)msg_q[1];
set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
break;
case HCLGE_MBX_PUSH_VLAN_INFO:
- state = le16_to_cpu(msg_q[1]);
+ state = msg_q[1];
vlan_info = &msg_q[1];
hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info, 8);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9c78251f9c39..0e13d1c7e474 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -136,7 +136,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma_addr;
int i, j;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index cca71ba7a74a..13e30eba5349 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1577,20 +1577,16 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
ehea_destroy_eq(pr->eq);
for (i = 0; i < pr->rq1_skba.len; i++)
- if (pr->rq1_skba.arr[i])
- dev_kfree_skb(pr->rq1_skba.arr[i]);
+ dev_kfree_skb(pr->rq1_skba.arr[i]);
for (i = 0; i < pr->rq2_skba.len; i++)
- if (pr->rq2_skba.arr[i])
- dev_kfree_skb(pr->rq2_skba.arr[i]);
+ dev_kfree_skb(pr->rq2_skba.arr[i]);
for (i = 0; i < pr->rq3_skba.len; i++)
- if (pr->rq3_skba.arr[i])
- dev_kfree_skb(pr->rq3_skba.arr[i]);
+ dev_kfree_skb(pr->rq3_skba.arr[i]);
for (i = 0; i < pr->sq_skba.len; i++)
- if (pr->sq_skba.arr[i])
- dev_kfree_skb(pr->sq_skba.arr[i]);
+ dev_kfree_skb(pr->sq_skba.arr[i]);
vfree(pr->rq1_skba.arr);
vfree(pr->rq2_skba.arr);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 395dde444483..9e43c9ace9c2 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1549,7 +1549,7 @@ emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
ctrl);
/* skb fragments */
for (i = 0; i < nr_frags; ++i) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index fa4bb940665c..4f83f97ffe8b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1485,7 +1485,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
memcpy(dst + cur,
page_address(skb_frag_page(frag)) +
- frag->page_offset, skb_frag_size(frag));
+ skb_frag_off(frag), skb_frag_size(frag));
cur += skb_frag_size(frag);
}
} else {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index a41008523c98..71d3d8854d8f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -937,8 +937,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
DMA_TO_DEVICE);
- if (txdr->buffer_info[i].skb)
- dev_kfree_skb(txdr->buffer_info[i].skb);
+ dev_kfree_skb(txdr->buffer_info[i].skb);
}
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f703fa58458e..86493fea56e4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2889,9 +2889,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
@@ -4176,8 +4175,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* an error means any chain goes out the window
* too
*/
- if (rx_ring->rx_skb_top)
- dev_kfree_skb(rx_ring->rx_skb_top);
+ dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 08342698386d..de8c5818a305 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1126,8 +1126,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
buffer_info->dma,
buffer_info->length,
DMA_TO_DEVICE);
- if (buffer_info->skb)
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb(buffer_info->skb);
}
}
@@ -1139,8 +1138,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
dma_unmap_single(&pdev->dev,
buffer_info->dma,
2048, DMA_FROM_DEVICE);
- if (buffer_info->skb)
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb(buffer_info->skb);
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 395b05701480..a1fab77b2096 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
else
phy_reg |= 0xFA;
e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
}
hw->phy.ops.release(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index eb09c755fa17..1502895eb45d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -210,7 +210,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
#define HV_PM_CTRL_K1_ENABLE 0x4000
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e4baa13b3cda..8a3f035c3a5f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5579,9 +5579,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
@@ -6297,7 +6296,7 @@ fl_out:
static int e1000e_pm_freeze(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
@@ -6630,7 +6629,7 @@ static int __e1000_resume(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int e1000e_pm_thaw(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
e1000e_set_interrupt_capability(adapter);
@@ -6679,8 +6678,7 @@ static int e1000e_pm_resume(struct device *dev)
static int e1000e_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
u16 eee_lp;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 7d42582ed48d..b14441944b4b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_H_
#define _FM10K_H_
@@ -177,14 +177,10 @@ static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
#define MIN_Q_VECTORS 1
enum fm10k_non_q_vectors {
FM10K_MBX_VECTOR,
-#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF
- NON_Q_VECTORS_PF
+ NON_Q_VECTORS
};
-#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \
- NON_Q_VECTORS_PF : \
- NON_Q_VECTORS_VF)
-#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw))
+#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS)
struct fm10k_q_vector {
struct fm10k_intfc *interface;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 20768ac7f17e..c45315472245 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
@@ -36,7 +36,7 @@ static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
u8 num_tc = 0;
- int i, err;
+ int i;
/* verify type and determine num_tcs needed */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
@@ -57,7 +57,7 @@ static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
/* update TC hardware mapping if necessary */
if (num_tc != netdev_get_num_tc(dev)) {
- err = fm10k_setup_tc(dev, num_tc);
+ int err = fm10k_setup_tc(dev, num_tc);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index dca104121c05..1d27b2fb23af 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -160,8 +160,6 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx);
q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
- if (!q_vector->dbg_q_vector)
- return;
/* Generate a file for each rx ring in the q_vector */
for (i = 0; i < q_vector->tx.count; i++) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 4895dd83dd08..c681d2d28107 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/vmalloc.h>
@@ -222,7 +222,6 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
const unsigned int size)
{
unsigned int i;
- char *p;
if (!pointer) {
/* memory is not zero allocated so we have to clear it */
@@ -232,7 +231,7 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
}
for (i = 0; i < size; i++) {
- p = (char *)pointer + stats[i].stat_offset;
+ char *p = (char *)pointer + stats[i].stat_offset;
switch (stats[i].sizeof_stat) {
case sizeof(u64):
@@ -651,7 +650,6 @@ static int fm10k_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_q_vector *qv;
u16 tx_itr, rx_itr;
int i;
@@ -677,7 +675,8 @@ static int fm10k_set_coalesce(struct net_device *dev,
/* update q_vectors */
for (i = 0; i < interface->num_q_vectors; i++) {
- qv = interface->q_vector[i];
+ struct fm10k_q_vector *qv = interface->q_vector[i];
+
qv->tx.itr = tx_itr;
qv->rx.itr = rx_itr;
}
@@ -1115,13 +1114,12 @@ static void fm10k_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
/* report maximum channels */
ch->max_combined = fm10k_max_channels(dev);
/* report info for other vector */
- ch->max_other = NON_Q_VECTORS(hw);
+ ch->max_other = NON_Q_VECTORS;
ch->other_count = ch->max_other;
/* record RSS queues */
@@ -1133,14 +1131,13 @@ static int fm10k_set_channels(struct net_device *dev,
{
struct fm10k_intfc *interface = netdev_priv(dev);
unsigned int count = ch->combined_count;
- struct fm10k_hw *hw = &interface->hw;
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
/* verify other_count has not changed */
- if (ch->other_count != NON_Q_VECTORS(hw))
+ if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 8de77155f2e7..afe1fafd2447 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
#include "fm10k_vf.h"
@@ -426,7 +426,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
size_t size;
- int i, err;
+ int i;
/* return error if iov_data is already populated */
if (iov_data)
@@ -452,6 +452,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
/* loop through vf_info structures initializing each entry */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
+ int err;
/* Record VF VSI value */
vf_info->vsi = i + 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 90270b4a1682..e0a2be534b20 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -17,7 +17,7 @@ const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright(c) 2013 - 2018 Intel Corporation.";
+ "Copyright(c) 2013 - 2019 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
@@ -315,7 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
/* prefetch first cache line of first page */
prefetch(page_addr);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
#endif
/* allocate a skb to store the frags */
@@ -946,7 +946,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct fm10k_tx_buffer *tx_buffer;
struct fm10k_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned char *data;
dma_addr_t dma;
unsigned int data_len, size;
@@ -1074,7 +1074,8 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
@@ -1823,7 +1824,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
v_budget = min_t(u16, v_budget, num_online_cpus());
/* account for vectors not related to queues */
- v_budget += NON_Q_VECTORS(hw);
+ v_budget += NON_Q_VECTORS;
/* At the same time, hardware can only support a maximum of
* hw.mac->max_msix_vectors vectors. With features
@@ -1855,7 +1856,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
}
/* record the number of queues available for q_vectors */
- interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
+ interface->num_q_vectors = v_budget - NON_Q_VECTORS;
return 0;
}
@@ -1869,7 +1870,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
{
struct net_device *dev = interface->netdev;
- int pc, offset, rss_i, i, q_idx;
+ int pc, offset, rss_i, i;
u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
u8 num_pcs = netdev_get_num_tc(dev);
@@ -1879,7 +1880,8 @@ static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
rss_i = interface->ring_feature[RING_F_RSS].indices;
for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
- q_idx = pc;
+ int q_idx = pc;
+
for (i = 0; i < rss_i; i++) {
interface->tx_ring[offset + i]->reg_idx = q_idx;
interface->tx_ring[offset + i]->qos_pc = pc;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 21021fe4f1c3..75e51f91036c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_common.h"
@@ -297,13 +297,14 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
{
struct fm10k_mbx_fifo *fifo = &mbx->rx;
u16 total_len = 0, msg_len;
- u32 *msg;
/* length should include previous amounts pushed */
len += mbx->pushed;
/* offset in message is based off of current message size */
do {
+ u32 *msg;
+
msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len);
msg_len = FM10K_TLV_DWORD_LEN(*msg);
total_len += msg_len;
@@ -1920,7 +1921,6 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/* reduce length by 1 to convert to a mask */
u16 mbmem_len = mbx->mbmem_len - 1;
u16 tail_len, len = 0;
- u32 *msg;
/* push head behind tail */
if (mbx->tail < head)
@@ -1930,6 +1930,8 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/* determine msg aligned offset for end of buffer */
do {
+ u32 *msg;
+
msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len);
tail_len = len;
len += FM10K_TLV_DWORD_LEN(*msg);
@@ -2132,7 +2134,8 @@ fifo_err:
* DWORDs, not bytes. Any invalid values will cause the mailbox to return
* error.
**/
-s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+s32 fm10k_sm_mbx_init(struct fm10k_hw __always_unused *hw,
+ struct fm10k_mbx_info *mbx,
const struct fm10k_msg_data *msg_data)
{
mbx->mbx_reg = FM10K_GMBX;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 538a8467f434..09f7a246e134 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
#include <linux/vmalloc.h>
@@ -54,7 +54,7 @@ err:
**/
static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface)
{
- int i, err = 0;
+ int i, err;
for (i = 0; i < interface->num_tx_queues; i++) {
err = fm10k_setup_tx_resources(interface->tx_ring[i]);
@@ -121,7 +121,7 @@ err:
**/
static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface)
{
- int i, err = 0;
+ int i, err;
for (i = 0; i < interface->num_rx_queues; i++) {
err = fm10k_setup_rx_resources(interface->rx_ring[i]);
@@ -169,7 +169,6 @@ void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
**/
static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
{
- struct fm10k_tx_buffer *tx_buffer;
unsigned long size;
u16 i;
@@ -179,7 +178,8 @@ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
- tx_buffer = &tx_ring->tx_buffer[i];
+ struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
+
fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
}
@@ -253,8 +253,7 @@ static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
if (!rx_ring->rx_buffer)
return;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -871,7 +870,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err = -EHOSTDOWN;
+ int err;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
@@ -891,7 +890,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err = -EHOSTDOWN;
+ int err;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
@@ -1444,11 +1443,11 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
struct fm10k_l2_accel *l2_accel)
{
- struct fm10k_ring *ring;
int i;
for (i = 0; i < interface->num_rx_queues; i++) {
- ring = interface->rx_ring[i];
+ struct fm10k_ring *ring = interface->rx_ring[i];
+
rcu_assign_pointer(ring->l2_accel, l2_accel);
}
@@ -1463,7 +1462,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
struct fm10k_l2_accel *old_l2_accel = NULL;
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
- int size = 0, i;
+ int size, i;
u16 vid, glort;
/* The hardware supported by fm10k only filters on the destination MAC
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index e49fb51d3613..bb236fa44048 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -344,7 +344,6 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
struct net_device *netdev = interface->netdev;
u32 __iomem *hw_addr;
u32 value;
- int err;
/* do nothing if netdev is still present or hw_addr is set */
if (netif_device_present(netdev) || interface->hw.hw_addr)
@@ -362,6 +361,8 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
hw_addr = READ_ONCE(interface->uc_addr);
value = readl(hw_addr);
if (~value) {
+ int err;
+
/* Make sure the reset was initiated because we detached,
* otherwise we might race with a different reset flow.
*/
@@ -697,8 +698,6 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
*/
static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
{
- int i;
-
/* If we're down or resetting, just bail */
if (test_bit(__FM10K_DOWN, interface->state) ||
test_bit(__FM10K_RESETTING, interface->state))
@@ -710,6 +709,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
interface->next_tx_hang_check = jiffies + (2 * HZ);
if (netif_carrier_ok(interface->netdev)) {
+ int i;
+
/* Force detection of hung controller */
for (i = 0; i < interface->num_tx_queues; i++)
set_check_for_tx_hang(interface->tx_ring[i]);
@@ -897,7 +898,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
/* Map interrupt */
if (ring->q_vector) {
- txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
+ txint = ring->q_vector->v_idx + NON_Q_VECTORS;
txint |= FM10K_INT_MAP_TIMER0;
}
@@ -1036,7 +1037,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* Map interrupt */
if (ring->q_vector) {
- rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
+ rxint = ring->q_vector->v_idx + NON_Q_VECTORS;
rxint |= FM10K_INT_MAP_TIMER1;
}
@@ -1719,10 +1720,9 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
void fm10k_qv_free_irq(struct fm10k_intfc *interface)
{
int vector = interface->num_q_vectors;
- struct fm10k_hw *hw = &interface->hw;
struct msix_entry *entry;
- entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
+ entry = &interface->msix_entries[NON_Q_VECTORS + vector];
while (vector) {
struct fm10k_q_vector *q_vector;
@@ -1759,7 +1759,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface)
unsigned int ri = 0, ti = 0;
int vector, err;
- entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
+ entry = &interface->msix_entries[NON_Q_VECTORS];
for (vector = 0; vector < interface->num_q_vectors; vector++) {
struct fm10k_q_vector *q_vector = interface->q_vector[vector];
@@ -2339,7 +2339,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
/* Restart the MAC/VLAN request queue in-case of outstanding events */
fm10k_macvlan_schedule(interface);
- return err;
+ return 0;
}
/**
@@ -2352,7 +2352,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
**/
static int __maybe_unused fm10k_resume(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
+ struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
int err;
@@ -2379,7 +2379,7 @@ static int __maybe_unused fm10k_resume(struct device *dev)
**/
static int __maybe_unused fm10k_suspend(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
+ struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index cb4d02629b86..be07bfdb0bb4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -1152,7 +1152,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
* assumption is that in this case it is acceptable to just directly
* hand off the message from the VF to the underlying shared code.
**/
-s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 __always_unused **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
@@ -1352,7 +1352,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u32 *result;
s32 err = 0;
u32 msg[2];
u8 mode = 0;
@@ -1362,7 +1361,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
return FM10K_ERR_PARAM;
if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
- result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
+ u32 *result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
/* XCAST mode update requested */
err = fm10k_tlv_attr_get_u8(result, &mode);
@@ -1566,7 +1565,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
/* read remaining fields */
fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
fault->address <<= 32;
- fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
+ fault->address |= fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
/* clear valid bit to allow for next error */
@@ -1642,7 +1641,7 @@ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
* switch API.
**/
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u16 glort, mask;
u32 dglort_map;
@@ -1685,7 +1684,7 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
* This handler configures the default VLAN for the PF
**/
static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u16 glort, pvid;
u32 pvid_update;
@@ -1746,7 +1745,7 @@ const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
* messages that the PF has sent.
**/
s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
struct fm10k_swapi_error err_msg;
s32 err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 2a7a40bf2b1c..21eff0895a7a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_tlv.h"
@@ -472,7 +472,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
{
u32 i, attr_id, offset = 0;
- s32 err = 0;
+ s32 err;
u16 len;
/* verify pointers are not NULL */
@@ -587,8 +587,9 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* a minimum it just indicates that the message requested was
* unimplemented.
**/
-s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
+ u32 __always_unused **results,
+ struct fm10k_mbx_info __always_unused *mbx)
{
return FM10K_NOT_IMPLEMENTED;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 9fb9fca375e3..15ac1c7885bc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_TYPE_H_
#define _FM10K_TYPE_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index a8519c1f0406..dc8ccd378ec9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_vf.h"
@@ -198,7 +198,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
* This function should determine the MAC address for the VF
**/
s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u8 perm_addr[ETH_ALEN];
u16 vid;
@@ -267,8 +267,10 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
* This function is used to add or remove unicast MAC addresses for
* the VF.
**/
-static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
- const u8 *mac, u16 vid, bool add, u8 flags)
+static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
+ const u8 *mac, u16 vid, bool add,
+ u8 __always_unused flags)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[7];
@@ -309,7 +311,8 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
* This function is used to add or remove multicast MAC addresses for
* the VF.
**/
-static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
+static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
const u8 *mac, u16 vid, bool add)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
@@ -373,7 +376,7 @@ const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
* are ready to bring up the interface.
**/
s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
@@ -392,8 +395,9 @@ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
* enabled we can add filters, if it is disabled all filters for this
* logical port are flushed.
**/
-static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
- u16 count, bool enable)
+static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
+ u16 __always_unused count, bool enable)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[2];
@@ -420,7 +424,8 @@ static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
* so that it can enable either multicast, multicast promiscuous, or
* promiscuous mode of operation.
**/
-static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
+static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort, u8 mode)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[3];
@@ -475,7 +480,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
* that information to then populate a DGLORTMAP/DEC entry and the queues
* to which it has been assigned.
**/
-static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
+static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw __always_unused *hw,
struct fm10k_dglort_cfg *dglort)
{
/* verify the dglort pointer */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 84bd06901014..3e535d3263b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1021,6 +1021,7 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
return NULL;
}
void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_veb_stats(struct i40e_veb *veb);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 814acbe79ffd..72c04881d290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -610,8 +610,10 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 8))
+ hw->aq.api_min_ver >= 8)) {
hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 6536023fa074..21cccec328e3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0008
-#define I40E_FW_API_VERSION_MINOR_X710 0x0008
+#define I40E_FW_API_VERSION_MINOR_X722 0x0009
+#define I40E_FW_API_VERSION_MINOR_X710 0x0009
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -2051,20 +2051,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 906cf68d3453..46e649c09f72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -13,7 +13,7 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
{
i40e_status status = 0;
@@ -1577,19 +1577,22 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
- if (status)
- break;
-
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ switch (hw->aq.asq_last_status) {
+ case I40E_AQ_RC_EIO:
status = I40E_ERR_UNKNOWN_PHY;
break;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ case I40E_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
status = I40E_ERR_TIMEOUT;
+ break;
+ /* also covers I40E_AQ_RC_OK */
+ default:
+ break;
}
- } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
- (total_delay < max_delay));
+
+ } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ (total_delay < max_delay));
if (status)
return status;
@@ -1643,25 +1646,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
return status;
}
-/**
- * i40e_set_fc
- * @hw: pointer to the hw struct
- * @aq_failures: buffer to return AdminQ failure information
- * @atomic_restart: whether to enable atomic link restart
- *
- * Set the requested flow control mode using set_phy_config.
- **/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_restart)
+static noinline_for_stack enum i40e_status_code
+i40e_set_fc_status(struct i40e_hw *hw,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ bool atomic_restart)
{
- enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
- struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config;
- enum i40e_status_code status;
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
u8 pause_mask = 0x0;
- *aq_failures = 0x0;
-
switch (fc_mode) {
case I40E_FC_FULL:
pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
@@ -1677,6 +1670,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
break;
}
+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ /* clear the old pause settings */
+ config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities == abilities->abilities)
+ return 0;
+
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities->phy_type;
+ config.phy_type_ext = abilities->phy_type_ext;
+ config.link_speed = abilities->link_speed;
+ config.eee_capability = abilities->eee_capability;
+ config.eeer = abilities->eeer_val;
+ config.low_power_ctrl = abilities->d3_lpan;
+ config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ return i40e_aq_set_phy_config(hw, &config, NULL);
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status;
+
+ *aq_failures = 0x0;
+
/* Get the current phy config */
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
NULL);
@@ -1685,31 +1720,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
return status;
}
- memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
- /* clear the old pause settings */
- config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
- ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
- /* set the new abilities */
- config.abilities |= pause_mask;
- /* If the abilities have changed, then set the new config */
- if (config.abilities != abilities.abilities) {
- /* Auto restart link so settings take effect */
- if (atomic_restart)
- config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- /* Copy over all the old settings */
- config.phy_type = abilities.phy_type;
- config.phy_type_ext = abilities.phy_type_ext;
- config.link_speed = abilities.link_speed;
- config.eee_capability = abilities.eee_capability;
- config.eeer = abilities.eeer_val;
- config.low_power_ctrl = abilities.d3_lpan;
- config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_PHY_FEC_CONFIG_MASK;
- status = i40e_aq_set_phy_config(hw, &config, NULL);
+ status = i40e_set_fc_status(hw, &abilities, atomic_restart);
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
- if (status)
- *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
- }
/* Update the link info */
status = i40e_update_link_info(hw);
if (status) {
@@ -2537,7 +2551,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
* i40e_updatelink_status - update status of the HW network link
* @hw: pointer to the hw struct
**/
-i40e_status i40e_update_link_info(struct i40e_hw *hw)
+noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
{
struct i40e_aq_get_phy_abilities_resp abilities;
i40e_status status = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 292eeb3def10..200a1cb3b536 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -877,7 +877,23 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ u8 offset = 0;
+
+ if (hw->mac.type == I40E_MAC_XL710)
+ offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
+ else if (hw->mac.type == I40E_MAC_X722)
+ offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
+ else
+ return I40E_NOT_SUPPORTED;
+
+ ret = i40e_read_nvm_module_data(hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ offset, 1,
+ &lldp_cfg.adminstatus);
+ } else {
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ }
if (ret)
return I40E_ERR_NOT_READY;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index ddb48ae7cce4..2a80c5daa376 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -30,6 +30,8 @@
#define I40E_CEE_SUBTYPE_APP_PRI 4
#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
+#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 55d20acfcf70..41232898d8ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1732,29 +1732,15 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
- struct dentry *pfile;
const char *name = pci_name(pf->pdev);
- const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
- if (!pf->i40e_dbg_pf)
- return;
-
- pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_command_fops);
- if (!pfile)
- goto create_failed;
- pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_netdev_ops_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
- return;
-
-create_failed:
- dev_info(dev, "debugfs dir/file for %s failed\n", name);
- debugfs_remove_recursive(pf->i40e_dbg_pf);
+ debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 527eb52c5401..41e1240acaea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -711,6 +711,35 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
}
/**
+ * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
+ * @req_fec_info: mask request FEC info
+ * @ks: ethtool ksettings to fill in
+ **/
+static void i40e_get_settings_link_up_fec(u8 req_fec_info,
+ struct ethtool_link_ksettings *ks)
+{
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+
+ if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ } else {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ }
+ }
+}
+
+/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
* @ks: ethtool ksettings to fill in
@@ -769,13 +798,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -892,9 +915,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -908,10 +928,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
ethtool_link_ksettings_add_link_mode(ks, advertising,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -929,13 +946,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
break;
case I40E_PHY_TYPE_25GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_ACC:
@@ -945,13 +957,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -2250,7 +2257,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ struct i40e_veb *veb = NULL;
unsigned int i;
bool veb_stats;
u64 *p = data;
@@ -2273,8 +2280,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
goto check_data_pointer;
veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->lan_veb < I40E_MAX_VEB) &&
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+ if (veb_stats) {
+ veb = pf->veb[pf->lan_veb];
+ i40e_update_veb_stats(veb);
+ }
+
/* If veb stats aren't enabled, pass NULL instead of the veb so that
* we initialize stats to zero and update the data pointer
* intelligently
@@ -2329,7 +2342,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
}
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
+ goto check_data_pointer;
i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
@@ -2341,6 +2354,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
+check_data_pointer:
WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
}
@@ -5123,6 +5137,12 @@ static int i40e_get_module_info(struct net_device *netdev,
/* Module is not SFF-8472 compliant */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) {
+ /* Module is SFF-8472 compliant but doesn't implement
+ * Digital Diagnostic Monitoring (DDM).
+ */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 9ebbe3da61bb..fdf43d87e983 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -534,6 +534,10 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
sizeof(pf->veb[i]->stats));
memset(&pf->veb[i]->stats_offsets, 0,
sizeof(pf->veb[i]->stats_offsets));
+ memset(&pf->veb[i]->tc_stats, 0,
+ sizeof(pf->veb[i]->tc_stats));
+ memset(&pf->veb[i]->tc_stats_offsets, 0,
+ sizeof(pf->veb[i]->tc_stats_offsets));
pf->veb[i]->stat_offsets_loaded = false;
}
}
@@ -677,7 +681,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
* i40e_update_veb_stats - Update Switch component statistics
* @veb: the VEB being updated
**/
-static void i40e_update_veb_stats(struct i40e_veb *veb)
+void i40e_update_veb_stats(struct i40e_veb *veb)
{
struct i40e_pf *pf = veb->pf;
struct i40e_hw *hw = &pf->hw;
@@ -2530,6 +2534,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
+ vsi->netdev->name,
+ cur_multipromisc ? "entering" : "leaving");
}
}
@@ -3360,7 +3368,7 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(vsi->tx_rings[i]);
- if (!i40e_enabled_xdp_vsi(vsi))
+ if (err || !i40e_enabled_xdp_vsi(vsi))
return err;
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
@@ -6412,50 +6420,6 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
}
/**
- * i40e_update_dcb_config
- * @hw: pointer to the HW struct
- * @enable_mib_change: enable MIB change event
- *
- * Update DCB configuration from the firmware
- **/
-static enum i40e_status_code
-i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change)
-{
- struct i40e_lldp_variables lldp_cfg;
- i40e_status ret;
-
- if (!hw->func_caps.dcb)
- return I40E_NOT_SUPPORTED;
-
- /* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
- if (ret)
- return I40E_ERR_NOT_READY;
-
- /* Get DCBX status */
- ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
- if (ret)
- return ret;
-
- /* Check the DCBX Status */
- if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
- hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
- /* Get current DCBX configuration */
- ret = i40e_get_dcb_config(hw);
- if (ret)
- return ret;
- } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- return I40E_ERR_NOT_READY;
- }
-
- /* Configure the LLDP MIB change event */
- if (enable_mib_change)
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
-
- return ret;
-}
-
-/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -6477,7 +6441,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
goto out;
}
- err = i40e_update_dcb_config(hw, true);
+ err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
if ((!hw->func_caps.dcb) ||
@@ -8486,6 +8450,11 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf, lock_acquired);
+ dev_info(&pf->pdev->dev,
+ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ "FW LLDP is disabled\n" :
+ "FW LLDP is enabled\n");
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -14569,9 +14538,20 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
**/
static bool i40e_check_recovery_mode(struct i40e_pf *pf)
{
- u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
-
- if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
+ bool is_recovery_mode = false;
+
+ if (pf->hw.mac.type == I40E_MAC_XL710)
+ is_recovery_mode =
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
+ if (pf->hw.mac.type == I40E_MAC_X722)
+ is_recovery_mode =
+ val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
+ val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
+ if (is_recovery_mode) {
dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
set_bit(__I40E_RECOVERY_MODE, pf->state);
@@ -14585,6 +14565,51 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
}
/**
+ * i40e_pf_loop_reset - perform reset in a loop.
+ * @pf: board private structure
+ *
+ * This function is useful when a NIC is about to enter recovery mode.
+ * When a NIC's internal data structures are corrupted the NIC's
+ * firmware is going to enter recovery mode.
+ * Right after a POR it takes about 7 minutes for firmware to enter
+ * recovery mode. Until that time a NIC is in some kind of intermediate
+ * state. After that time period the NIC almost surely enters
+ * recovery mode. The only way for a driver to detect intermediate
+ * state is to issue a series of pf-resets and check a return value.
+ * If a PF reset returns success then the firmware could be in recovery
+ * mode so the caller of this code needs to check for recovery mode
+ * if this function returns success. There is a little chance that
+ * firmware will hang in intermediate state forever.
+ * Since waiting 7 minutes is quite a lot of time this function waits
+ * 10 seconds and then gives up by returning an error.
+ *
+ * Return 0 on success, negative on failure.
+ **/
+static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
+{
+ const unsigned short MAX_CNT = 1000;
+ const unsigned short MSECS = 10;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ int cnt;
+
+ for (cnt = 0; cnt < MAX_CNT; ++cnt) {
+ ret = i40e_pf_reset(hw);
+ if (!ret)
+ break;
+ msleep(MSECS);
+ }
+
+ if (cnt == MAX_CNT) {
+ dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
+ return ret;
+ }
+
+ pf->pfr_count++;
+ return ret;
+}
+
+/**
* i40e_init_recovery_mode - initialize subsystems needed in recovery mode
* @pf: board private structure
* @hw: ptr to the hardware info
@@ -14812,14 +14837,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Reset here to make sure all is clean and to define PF 'n' */
i40e_clear_hw(hw);
- if (!i40e_check_recovery_mode(pf)) {
- err = i40e_pf_reset(hw);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
- goto err_pf_reset;
- }
- pf->pfr_count++;
+
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
+ goto err_pf_reset;
+ }
+
+ err = i40e_pf_loop_reset(pf);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
}
+
+ i40e_check_recovery_mode(pf);
+
hw->aq.num_arq_entries = I40E_AQ_LEN;
hw->aq.num_asq_entries = I40E_AQ_LEN;
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
@@ -15605,8 +15638,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
**/
static int __maybe_unused i40e_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_pf *pf = dev_get_drvdata(dev);
struct i40e_hw *hw = &pf->hw;
/* If we're already suspended, then there is nothing to do */
@@ -15656,8 +15688,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
**/
static int __maybe_unused i40e_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_pf *pf = dev_get_drvdata(dev);
int err;
/* If we're not suspended, then there is nothing to do */
@@ -15674,7 +15705,7 @@ static int __maybe_unused i40e_resume(struct device *dev)
*/
err = i40e_restore_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
err);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index c508b75c3c09..e4d8d20baf3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -322,6 +322,77 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
+ * @hw: pointer to the HW structure
+ * @module_ptr: Pointer to module in words with respect to NVM beginning
+ * @offset: offset in words from module start
+ * @words_data_size: Words to read from NVM
+ * @data_ptr: Pointer to memory location where resulting buffer will be stored
+ **/
+i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr, u16 offset,
+ u16 words_data_size,
+ u16 *data_ptr)
+{
+ i40e_status status;
+ u16 ptr_value = 0;
+ u32 flat_offset;
+
+ if (module_ptr != 0) {
+ status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ }
+#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
+#define I40E_NVM_INVALID_VAL 0xFFFF
+
+ /* Pointer not initialized */
+ if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
+ ptr_value == I40E_NVM_INVALID_VAL)
+ return I40E_ERR_BAD_PTR;
+
+ /* Check whether the module is in SR mapped area or outside */
+ if (ptr_value & I40E_PTR_TYPE) {
+ /* Pointer points outside of the Shared RAM mapped area */
+ ptr_value &= ~I40E_PTR_TYPE;
+
+ /* PtrValue in 4kB units, need to convert to words */
+ ptr_value /= 2;
+ flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!status) {
+ status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
+ 2 * words_data_size,
+ data_ptr, true, NULL);
+ i40e_release_nvm(hw);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm aq failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ } else {
+ return I40E_ERR_NVM;
+ }
+ } else {
+ /* Read from the Shadow RAM */
+ status = i40e_read_nvm_buffer(hw, ptr_value + offset,
+ &words_data_size, data_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm buffer failed.Error code: %d.\n",
+ status);
+ }
+ }
+
+ return status;
+}
+
+/**
* i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -430,6 +501,36 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
}
/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+
+ return ret_code;
+}
+
+/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index eac88bcc6c06..5250441bf75b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -315,6 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
+i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr, u16 offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
@@ -326,6 +332,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 52e3680c57f8..d35d690ca10f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -58,7 +58,7 @@
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
#define I40E_PF_ARQT_ARQT_SHIFT 0
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -81,7 +81,7 @@
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
#define I40E_PF_ATQT_ATQT_SHIFT 0
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -108,7 +108,7 @@
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ARQT_MAX_INDEX 127
#define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -136,7 +136,7 @@
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ATQT_MAX_INDEX 127
#define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -259,7 +259,7 @@
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -363,6 +363,12 @@
#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
@@ -503,7 +509,7 @@
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1242,14 +1248,14 @@
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1658,7 +1664,7 @@
#define I40E_GLNVM_SRCTL_START_SHIFT 30
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -3025,7 +3031,7 @@
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3161,7 +3167,7 @@
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3184,7 +3190,7 @@
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2a2fe3ec7926..e3f29dc8b290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
bool __i40e_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* descriptor associated with the fragment.
*/
if (stale_size > I40E_MAX_DATA_PER_TXD) {
- int align_pad = -(stale->page_offset) &
+ int align_pad = -(skb_frag_off(stale)) &
(I40E_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
@@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 100e92d2982f..36d37f31a287 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
**/
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 8f43aa47c263..b43ec94a0f29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -443,6 +443,7 @@ struct i40e_nvm_access {
#define I40E_MODULE_SFF_8472_COMP 0x5E
#define I40E_MODULE_SFF_8472_SWAP 0x5C
#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40
#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
#define I40E_MODULE_TYPE_QSFP28 0x11
#define I40E_MODULE_QSFP_MAX_LEN 640
@@ -623,6 +624,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
u64 flags;
/* Used in set switch config AQ command */
@@ -1316,6 +1318,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02b09a8ad54c..f8aa4deceb5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -55,7 +55,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
- if (vf->link_forced) {
+
+ /* Always report link is down if the VF queues aren't enabled */
+ if (!vf->queues_enabled) {
+ pfe.event_data.link_event.link_status = false;
+ pfe.event_data.link_event.link_speed = 0;
+ } else if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
@@ -65,6 +70,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
+
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -2037,30 +2043,33 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
alluni = true;
aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
alluni);
- if (!aq_ret) {
- if (allmulti) {
+ if (aq_ret)
+ goto err_out;
+
+ if (allmulti) {
+ if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
"VF %d successfully set multicast promiscuous mode\n",
vf->vf_id);
- set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- } else {
- dev_info(&pf->pdev->dev,
- "VF %d successfully unset multicast promiscuous mode\n",
- vf->vf_id);
- clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- }
- if (alluni) {
+ } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
+
+ if (alluni) {
+ if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
"VF %d successfully set unicast promiscuous mode\n",
vf->vf_id);
- set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- } else {
- dev_info(&pf->pdev->dev,
- "VF %d successfully unset unicast promiscuous mode\n",
- vf->vf_id);
- clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- }
- }
+ } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+
err_out:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
@@ -2153,7 +2162,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* VF does not know about these additional VSIs and all
* it cares is about its own queues. PF configures these queues
* to its appropriate VSIs based on TC mapping
- **/
+ */
if (vf->adq_enabled) {
if (idx >= ARRAY_SIZE(vf->ch)) {
aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
@@ -2364,6 +2373,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
+ vf->queues_enabled = true;
+
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2385,6 +2396,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
+ /* Immediately mark queues as disabled */
+ vf->queues_enabled = false;
+
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -3953,10 +3967,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
+ * If the VF is indeed in reset, the vsi pointer has
+ * to show on the newly loaded vsi under pf->vsi[id].
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ if (i > 0)
+ vsi = pf->vsi[vf->lan_vsi_idx];
break;
+ }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index f65cc0c16550..7164b9bb294f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -99,6 +99,7 @@ struct i40e_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
+ bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_mac;
u16 num_vlan;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 0cca1b589b56..7a30d5d5ef53 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2161,7 +2161,7 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
**/
bool __iavf_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb)
* descriptor associated with the fragment.
*/
if (stale_size > IAVF_MAX_DATA_PER_TXD) {
- int align_pad = -(stale->page_offset) &
+ int align_pad = -(skb_frag_off(stale)) &
(IAVF_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
@@ -2269,7 +2269,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct iavf_tx_buffer *tx_bi;
struct iavf_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 71e7d090f8db..dd3348f9da9d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -462,7 +462,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
**/
static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 9ee6b55553c0..fb2bc836b20a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -69,11 +69,10 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
-#define ICE_MBXQ_LEN 64
+#define ICE_MBXSQ_LEN 64
+#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
-#define ICE_MAX_TXQS 2048
-#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
#define ICE_MAX_SCATTER_TXQS 16
@@ -86,16 +85,6 @@ extern const char ice_drv_ver[];
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
-#define ICE_MAX_VF_COUNT 256
-#define ICE_MAX_QS_PER_VF 256
-#define ICE_MIN_QS_PER_VF 1
-#define ICE_DFLT_QS_PER_VF 4
-#define ICE_NONQ_VECS_VF 1
-#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_BASE_QS_PER_VF 16
-#define ICE_MAX_INTR_PER_VF 65
-#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_RESET_WAIT 20
@@ -220,6 +209,7 @@ enum ice_state {
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
+ __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_STATE_NBITS /* must be last */
};
@@ -292,8 +282,8 @@ struct ice_vsi {
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */
- u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */
+ u16 *txq_map; /* index in pf->avail_txqs */
+ u16 *rxq_map; /* index in pf->avail_rxqs */
u16 alloc_txq; /* Allocated Tx queues */
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
@@ -329,7 +319,6 @@ struct ice_q_vector {
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
- ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
@@ -337,7 +326,8 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
- ICE_FLAG_ENABLE_FW_LLDP,
+ ICE_FLAG_NO_MEDIA,
+ ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -363,9 +353,9 @@ struct ice_pf {
u16 num_vf_qps; /* num queue pairs per VF */
u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
- DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
- DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
+ unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
+ unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
@@ -376,6 +366,8 @@ struct ice_pf {
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */
u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u16 max_pf_txqs; /* Total Tx queues PF wide */
+ u16 max_pf_rxqs; /* Total Rx queues PF wide */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
@@ -455,6 +447,8 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_update_vsi_stats(struct ice_vsi *vsi);
+void ice_update_pf_stats(struct ice_pf *pf);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 765e3c2ed045..bf9aa533a7c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1610,6 +1610,7 @@ enum ice_aq_err {
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
};
/* Admin Queue command opcodes */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2e0731c1e1a3..302ad981129c 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -263,21 +263,23 @@ enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
- struct ice_link_status *hw_link_info_old, *hw_link_info;
struct ice_aqc_get_link_status_data link_data = { 0 };
struct ice_aqc_get_link_status *resp;
+ struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
enum ice_status status;
+ struct ice_hw *hw;
u16 cmd_flags;
if (!pi)
return ICE_ERR_PARAM;
- hw_link_info_old = &pi->phy.link_info_old;
+ hw = pi->hw;
+ li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
- hw_link_info = &pi->phy.link_info;
+ li = &pi->phy.link_info;
hw_fc_info = &pi->fc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
@@ -286,27 +288,27 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
- status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
- cd);
+ status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
if (status)
return status;
/* save off old link status information */
- *hw_link_info_old = *hw_link_info;
+ *li_old = *li;
/* update current link status information */
- hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
- hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
- hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
- hw_link_info->link_info = link_data.link_info;
- hw_link_info->an_info = link_data.an_info;
- hw_link_info->ext_info = link_data.ext_info;
- hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
- hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
- hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
- hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
+ li->link_info = link_data.link_info;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
+ ICE_AQ_CFG_PACING_TYPE_M);
/* update fc info */
tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
@@ -320,12 +322,24 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
else
hw_fc_info->current_mode = ICE_FC_NONE;
- hw_link_info->lse_ena =
- !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
+ li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
+
+ ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)li->phy_type_low);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)li->phy_type_high);
+ ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
+ ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
+ ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
+ ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
+ ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
+ ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
/* save link status information */
if (link)
- *link = *hw_link_info;
+ *link = *li;
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
@@ -740,7 +754,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_get_itr_intrl_gran(hw);
- status = ice_init_all_ctrlq(hw);
+ status = ice_create_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
@@ -855,7 +869,7 @@ err_unroll_sched:
err_unroll_alloc:
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
err_unroll_cqinit:
- ice_shutdown_all_ctrlq(hw);
+ ice_destroy_all_ctrlq(hw);
return status;
}
@@ -881,7 +895,7 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
- ice_shutdown_all_ctrlq(hw);
+ ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
ice_clear_all_vsi_ctx(hw);
@@ -1078,6 +1092,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
+ ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
{ 0 }
};
@@ -1088,7 +1103,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
- * it to HW register space
+ * it to HW register space and enables the hardware to prefetch descriptors
+ * instead of only fetching them on demand
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@@ -1096,6 +1112,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ if (!rlan_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ rlan_ctx->prefena = 1;
+
ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
@@ -1993,6 +2014,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cfg->phy_type_low));
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cfg->phy_type_high));
+ ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
+ ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
+ cfg->low_power_ctrl);
+ ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+
return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
}
@@ -2024,7 +2056,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
if (!status)
memcpy(li->module_type, &pcaps->module_type,
@@ -2174,27 +2206,24 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
{
switch (fec) {
case ICE_FEC_BASER:
- /* Clear auto FEC and RS bits, and AND BASE-R ability
+ /* Clear RS bits, and AND BASE-R ability
* bits and OR request bits.
*/
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
ICE_AQC_PHY_FEC_25G_KR_REQ;
break;
case ICE_FEC_RS:
- /* Clear auto FEC and BASE-R bits, and AND RS ability
+ /* Clear BASE-R bits, and AND RS ability
* bits and OR request bits.
*/
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
ICE_AQC_PHY_FEC_25G_RS_544_REQ;
break;
case ICE_FEC_NONE:
- /* Clear auto FEC and all FEC option bits. */
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ /* Clear all FEC option bits. */
cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
break;
case ICE_FEC_AUTO:
@@ -3240,40 +3269,44 @@ void ice_replay_post(struct ice_hw *hw)
/**
* ice_stat_update40 - read 40 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
- * @hireg: high 32 bit HW register to read from
- * @loreg: low 32 bit HW register to read from
+ * @reg: offset of 64 bit HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
void
-ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
{
- u64 new_data;
-
- new_data = rd32(hw, loreg);
- new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+ u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
/* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
*/
- if (!prev_stat_loaded)
+ if (!prev_stat_loaded) {
*prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
if (new_data >= *prev_stat)
- *cur_stat = new_data - *prev_stat;
+ *cur_stat += new_data - *prev_stat;
else
/* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
- *cur_stat &= 0xFFFFFFFFFFULL;
+ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
}
/**
* ice_stat_update32 - read 32 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
- * @reg: HW register to read from
+ * @reg: offset of HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
@@ -3287,17 +3320,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
new_data = rd32(hw, reg);
/* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
*/
- if (!prev_stat_loaded)
+ if (!prev_stat_loaded) {
*prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
if (new_data >= *prev_stat)
- *cur_stat = new_data - *prev_stat;
+ *cur_stat += new_data - *prev_stat;
else
/* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
+ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index d1f8353fe6bb..e376d1eadba4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -17,8 +17,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
@@ -123,8 +125,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
void
-ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e91ac4df0242..2353166c654e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @cq: pointer to the specific Control queue
*
* This is the main initialization routine for the Control Send Queue
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_sq_entries
* - cq->sq_buf_size
@@ -369,7 +369,7 @@ init_ctrlq_exit:
* @cq: pointer to the specific Control queue
*
* The main initialization routine for the Admin Receive (Event) Queue.
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
* - cq->rq_buf_size
@@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
- if (cq->rq.count) {
- ice_shutdown_rq(hw, cq);
- mutex_destroy(&cq->rq_lock);
- }
- if (cq->sq.count) {
- ice_shutdown_sq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- }
+ ice_shutdown_rq(hw, cq);
+ ice_shutdown_sq(hw, cq);
return status;
}
@@ -585,12 +579,14 @@ init_ctrlq_free_rq:
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
*
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks
*/
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
!cq->rq_buf_size || !cq->sq_buf_size) {
return ICE_ERR_CFG;
}
- mutex_init(&cq->sq_lock);
- mutex_init(&cq->rq_lock);
/* setup SQ command write back timeout */
cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
@@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/* allocate the ATQ */
ret_code = ice_init_sq(hw, cq);
if (ret_code)
- goto init_ctrlq_destroy_locks;
+ return ret_code;
/* allocate the ARQ */
ret_code = ice_init_rq(hw, cq);
@@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
init_ctrlq_free_sq:
ice_shutdown_sq(hw, cq);
-init_ctrlq_destroy_locks:
- mutex_destroy(&cq->sq_lock);
- mutex_destroy(&cq->rq_lock);
return ret_code;
}
@@ -647,12 +638,14 @@ init_ctrlq_destroy_locks:
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks.
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
@@ -672,9 +665,47 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
}
/**
+ * ice_init_ctrlq_locks - Initialize locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Initializes the send and receive queue locks for a given control queue.
+ */
+static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ mutex_init(&cq->sq_lock);
+ mutex_init(&cq->rq_lock);
+}
+
+/**
+ * ice_create_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * This function creates all the control queue locks and then calls
+ * ice_init_all_ctrlq. It should be called once during driver load. If the
+ * driver needs to re-initialize control queues at run time it should call
+ * ice_init_all_ctrlq instead.
+ */
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+{
+ ice_init_ctrlq_locks(&hw->adminq);
+ ice_init_ctrlq_locks(&hw->mailboxq);
+
+ return ice_init_all_ctrlq(hw);
+}
+
+/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
*/
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return;
}
- if (cq->sq.count) {
- ice_shutdown_sq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- }
- if (cq->rq.count) {
- ice_shutdown_rq(hw, cq);
- mutex_destroy(&cq->rq_lock);
- }
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
}
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
*/
void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
@@ -716,6 +745,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
}
/**
+ * ice_destroy_ctrlq_locks - Destroy locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Destroys the send and receive queue locks for a given control queue.
+ */
+static void
+ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ mutex_destroy(&cq->sq_lock);
+ mutex_destroy(&cq->rq_lock);
+}
+
+/**
+ * ice_destroy_all_ctrlq - exit routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * This function shuts down all the control queues and then destroys the
+ * control queue locks. It should be called once during driver unload. The
+ * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
+ * reinitialize control queues, such as in response to a reset event.
+ */
+void ice_destroy_all_ctrlq(struct ice_hw *hw)
+{
+ /* shut down all the control queues first */
+ ice_shutdown_all_ctrlq(hw);
+
+ ice_destroy_ctrlq_locks(&hw->adminq);
+ ice_destroy_ctrlq_locks(&hw->mailboxq);
+}
+
+/**
* ice_clean_sq - cleans Admin send queue (ATQ)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index c2002ded65f6..d60c942249e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -954,7 +954,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw)
pi->dcbx_status = ice_get_dcbx_status(hw);
if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
- pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) {
+ pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+ pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
ret = ice_get_dcb_cfg(pi);
pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index fe88b127ca42..d9578919aad8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -204,15 +204,86 @@ out:
}
/**
+ * ice_cfg_etsrec_defaults - Set default ETS recommended DCB config
+ * @pi: port information structure
+ */
+static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg;
+ u8 i;
+
+ /* Ensure ETS recommended DCB configuration is not already set */
+ if (dcbcfg->etsrec.maxtcs)
+ return;
+
+ /* In CEE mode, set the default to 1 TC */
+ dcbcfg->etsrec.maxtcs = 1;
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100;
+ dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT :
+ ICE_IEEE_TSA_ETS;
+ }
+}
+
+/**
+ * ice_dcb_need_recfg - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ */
+static bool
+ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prio_table,
+ &old_cfg->etscfg.prio_table,
+ sizeof(new_cfg->etscfg.prio_table))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+ return need_reconfig;
+}
+
+/**
* ice_dcb_rebuild - rebuild DCB post reset
* @pf: physical function instance
*/
void ice_dcb_rebuild(struct ice_pf *pf)
{
+ struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
- struct ice_dcbx_cfg *prev_cfg;
enum ice_status ret;
- u8 willing;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -224,9 +295,15 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
+ local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
/* Save current willing state and force FW to unwilling */
- willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
- pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
+ local_dcbx_cfg->etscfg.willing = 0x0;
+ local_dcbx_cfg->pfc.willing = 0x0;
+ local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
+
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
@@ -234,8 +311,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
/* Retrieve DCB config and ensure same as current in SW */
- prev_cfg = devm_kmemdup(&pf->pdev->dev,
- &pf->hw.port_info->local_dcbx_cfg,
+ prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg,
sizeof(*prev_cfg), GFP_KERNEL);
if (!prev_cfg) {
dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
@@ -243,22 +319,29 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
ice_init_dcb(&pf->hw);
- if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
- sizeof(*prev_cfg))) {
+ if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
+ pf->hw.port_info->is_sw_lldp = true;
+ else
+ pf->hw.port_info->is_sw_lldp = false;
+
+ if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
/* difference in cfg detected - disable DCB till next MIB */
dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
- devm_kfree(&pf->pdev->dev, prev_cfg);
goto dcb_error;
}
/* fetched config congruent to previous configuration */
devm_kfree(&pf->pdev->dev, prev_cfg);
- /* Configuration replayed - reset willing state to previous */
- pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
+ /* Set the local desired config */
+ if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
+ memcpy(local_dcbx_cfg, desired_dcbx_cfg,
+ sizeof(*local_dcbx_cfg));
+
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
+ dev_err(&pf->pdev->dev, "Failed to set desired config\n");
goto dcb_error;
}
dev_info(&pf->pdev->dev, "DCB restored after reset\n");
@@ -364,35 +447,17 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
struct device *dev = &pf->pdev->dev;
struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw;
- int sw_default = 0;
int err;
port_info = hw->port_info;
err = ice_init_dcb(hw);
if (err) {
- /* FW LLDP is not active, default to SW DCBX/LLDP */
- dev_info(&pf->pdev->dev, "FW LLDP is not active\n");
- hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
- hw->port_info->is_sw_lldp = true;
- }
-
- if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
- dev_info(&pf->pdev->dev, "DCBX disabled\n");
-
- /* LLDP disabled in FW */
- if (port_info->is_sw_lldp) {
- sw_default = 1;
- dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
- clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
- } else {
- set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
- }
-
- if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED)
- dev_info(&pf->pdev->dev, "DCBX not started\n");
-
- if (sw_default) {
+ /* FW LLDP is disabled, activate SW DCBX/LLDP mode */
+ dev_info(&pf->pdev->dev,
+ "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
+ port_info->is_sw_lldp = true;
+ clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, locked);
if (err) {
dev_err(&pf->pdev->dev,
@@ -407,6 +472,9 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
return 0;
}
+ port_info->is_sw_lldp = false;
+ set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+
/* DCBX in FW and LLDP enabled in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
@@ -432,30 +500,31 @@ void ice_update_dcb_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
- u8 pf_id = hw->pf_id;
+ u8 port;
int i;
+ port = hw->port_info->lport;
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
for (i = 0; i < 8; i++) {
- ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i),
+ ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xoff_rx[i],
&cur_ps->priority_xoff_rx[i]);
- ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i),
+ ice_stat_update32(hw, GLPRT_PXONRXC(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_rx[i],
&cur_ps->priority_xon_rx[i]);
- ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i),
+ ice_stat_update32(hw, GLPRT_PXONTXC(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_tx[i],
&cur_ps->priority_xon_tx[i]);
- ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i),
+ ice_stat_update32(hw, GLPRT_PXOFFTXC(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xoff_tx[i],
&cur_ps->priority_xoff_tx[i]);
- ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i),
+ ice_stat_update32(hw, GLPRT_RXON2OFFCNT(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_2_xoff[i],
&cur_ps->priority_xon_2_xoff[i]);
@@ -502,55 +571,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
}
/**
- * ice_dcb_need_recfg - Check if DCB needs reconfig
- * @pf: board private structure
- * @old_cfg: current DCB config
- * @new_cfg: new DCB config
- */
-static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
- struct ice_dcbx_cfg *new_cfg)
-{
- bool need_reconfig = false;
-
- /* Check if ETS configuration has changed */
- if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
- sizeof(new_cfg->etscfg))) {
- /* If Priority Table has changed reconfig is needed */
- if (memcmp(&new_cfg->etscfg.prio_table,
- &old_cfg->etscfg.prio_table,
- sizeof(new_cfg->etscfg.prio_table))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
- }
-
- if (memcmp(&new_cfg->etscfg.tcbwtable,
- &old_cfg->etscfg.tcbwtable,
- sizeof(new_cfg->etscfg.tcbwtable)))
- dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
-
- if (memcmp(&new_cfg->etscfg.tsatable,
- &old_cfg->etscfg.tsatable,
- sizeof(new_cfg->etscfg.tsatable)))
- dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
- }
-
- /* Check if PFC configuration has changed */
- if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
- }
-
- /* Check if APP Table has changed */
- if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
- }
-
- dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
- return need_reconfig;
-}
-
-/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 52083a63dee6..f7dd0bd03d39 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -155,7 +155,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
- ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP),
+ ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -1201,8 +1201,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
- if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) {
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) {
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
enum ice_status status;
/* Disable FW LLDP engine */
@@ -1319,14 +1319,17 @@ ice_get_ethtool_stats(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_ring *ring;
- unsigned int j = 0;
+ unsigned int j;
int i = 0;
char *p;
+ ice_update_pf_stats(pf);
+ ice_update_vsi_stats(vsi);
+
for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
/* populate per queue stats */
@@ -1716,6 +1719,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_port_info *pi = np->vsi->port_info;
struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
@@ -2040,6 +2044,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
break;
}
ks->base.duplex = DUPLEX_FULL;
+
+ if (link_info->an_info & ICE_AQ_AN_COMPLETED)
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Autoneg);
+
+ /* Set flow control negotiated Rx/Tx pause */
+ switch (pi->fc.current_mode) {
+ case ICE_FC_FULL:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
+ break;
+ case ICE_FC_TX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ case ICE_FC_RX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ case ICE_FC_PFC:
+ /* fall through */
+ default:
+ ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
+ ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ }
}
/**
@@ -2078,9 +2109,12 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
+ enum ice_status status;
+ int err = 0;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
hw_link_info = &vsi->port_info->phy.link_info;
/* set speed and duplex */
@@ -2125,48 +2159,36 @@ ice_get_link_ksettings(struct net_device *netdev,
/* flow control is symmetric and always supported */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
- switch (vsi->port_info->fc.req_mode) {
- case ICE_FC_FULL:
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_SW_CFG, caps, NULL);
+ if (status) {
+ err = -EIO;
+ goto done;
+ }
+
+ /* Set the advertised flow control based on the PHY capability */
+ if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
+ (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) {
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
- break;
- case ICE_FC_TX_PAUSE:
ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
- break;
- case ICE_FC_RX_PAUSE:
+ } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+ } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) {
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
- break;
- case ICE_FC_PFC:
- default:
+ } else {
ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, advertising,
Asym_Pause);
- break;
}
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
- if (!caps)
- goto done;
-
- if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP,
- caps, NULL))
- netdev_info(netdev, "Get phy capability failed.\n");
-
- /* Set supported FEC modes based on PHY capability */
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
-
- if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
- caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
-
- if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG,
- caps, NULL))
- netdev_info(netdev, "Get phy capability failed.\n");
-
/* Set advertised FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
@@ -2178,9 +2200,25 @@ ice_get_link_ksettings(struct net_device *netdev,
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ status = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP, caps, NULL);
+ if (status) {
+ err = -EIO;
+ goto done;
+ }
+
+ /* Set supported FEC modes based on PHY capability */
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+
done:
devm_kfree(&vsi->back->pdev->dev, caps);
- return 0;
+ return err;
}
/**
@@ -2763,6 +2801,11 @@ static int ice_nway_reset(struct net_device *netdev)
* ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure
* @pause: ethernet pause (flow control) parameters
+ *
+ * Get requested flow control status from PHY capability.
+ * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which
+ * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report
+ * the negotiated Rx/Tx pause via lp_advertising.
*/
static void
ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
@@ -2816,6 +2859,7 @@ static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
struct ice_dcbx_cfg *dcbx_cfg;
@@ -2826,6 +2870,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
u8 aq_failures;
bool link_up;
int err = 0;
+ u32 is_an;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
@@ -2840,7 +2885,30 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EOPNOTSUPP;
}
- if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
+ /* Get pause param reports configured and negotiated flow control pause
+ * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is
+ * defined get pause param pause->autoneg reports SW configured setting,
+ * so compare pause->autoneg with SW configured to prevent the user from
+ * using set pause param to chance autoneg.
+ */
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ /* Get current PHY config */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ kfree(pcaps);
+ return -EIO;
+ }
+
+ is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ kfree(pcaps);
+
+ if (pause->autoneg != is_an) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6c5ce05742b1..6f78ff5534af 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -127,8 +127,11 @@
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3)
#define GLINT_DYN_CTL_INTERVAL_S 5
+#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
+#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
@@ -281,14 +284,10 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
-#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
-#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
-#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
-#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
@@ -296,38 +295,22 @@
#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
-#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
-#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
-#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
-#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
-#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
-#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
-#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
-#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
-#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
-#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
-#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
-#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
-#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
-#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
-#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
-#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
@@ -340,32 +323,23 @@
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
-#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
-#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
-#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
-#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
-#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
-#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
-#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
-#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
-#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
-#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 510a8c900e61..57ea6811fe2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -290,6 +290,7 @@ struct ice_rlan_ctx {
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
struct ice_ctx_ele {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a19f5920733b..a39767e8c2a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -191,41 +191,58 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
}
/**
- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
* @vsi: the VSI being configured
* @ena: start or stop the Rx rings
+ * @rxq_idx: Rx queue index
*/
-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+#ifndef CONFIG_PCI_IOV
+static
+#endif /* !CONFIG_PCI_IOV */
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
{
+ int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int i, ret = 0;
+ int ret = 0;
+ u32 rx_reg;
- for (i = 0; i < vsi->num_rxq; i++) {
- int pf_q = vsi->rxq_map[i];
- u32 rx_reg;
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ return 0;
- /* Skip if the queue is already in the requested state */
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- continue;
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
- /* turn on/off the queue */
- if (ena)
- rx_reg |= QRX_CTRL_QENA_REQ_M;
- else
- rx_reg &= ~QRX_CTRL_QENA_REQ_M;
- wr32(hw, QRX_CTRL(pf_q), rx_reg);
-
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+
+ return ret;
+}
+
+/**
+ * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ */
+static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < vsi->num_rxq; i++) {
+ ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
+ if (ret)
break;
- }
}
return ret;
@@ -246,12 +263,24 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
sizeof(*vsi->tx_rings), GFP_KERNEL);
if (!vsi->tx_rings)
- goto err_txrings;
+ return -ENOMEM;
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
sizeof(*vsi->rx_rings), GFP_KERNEL);
if (!vsi->rx_rings)
- goto err_rxrings;
+ goto err_rings;
+
+ vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ sizeof(*vsi->txq_map), GFP_KERNEL);
+
+ if (!vsi->txq_map)
+ goto err_txq_map;
+
+ vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ sizeof(*vsi->rxq_map), GFP_KERNEL);
+ if (!vsi->rxq_map)
+ goto err_rxq_map;
+
/* There is no need to allocate q_vectors for a loopback VSI. */
if (vsi->type == ICE_VSI_LB)
@@ -266,10 +295,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
return 0;
err_vectors:
+ devm_kfree(&pf->pdev->dev, vsi->rxq_map);
+err_rxq_map:
+ devm_kfree(&pf->pdev->dev, vsi->txq_map);
+err_txq_map:
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
-err_rxrings:
+err_rings:
devm_kfree(&pf->pdev->dev, vsi->tx_rings);
-err_txrings:
return -ENOMEM;
}
@@ -416,6 +448,14 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
vsi->rx_rings = NULL;
}
+ if (vsi->txq_map) {
+ devm_kfree(&pf->pdev->dev, vsi->txq_map);
+ vsi->txq_map = NULL;
+ }
+ if (vsi->rxq_map) {
+ devm_kfree(&pf->pdev->dev, vsi->rxq_map);
+ vsi->rxq_map = NULL;
+ }
}
/**
@@ -647,7 +687,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
struct ice_qs_cfg tx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_txqs,
- .pf_map_size = ICE_MAX_TXQS,
+ .pf_map_size = pf->max_pf_txqs,
.q_count = vsi->alloc_txq,
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
@@ -657,7 +697,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_rxqs,
- .pf_map_size = ICE_MAX_RXQS,
+ .pf_map_size = pf->max_pf_rxqs,
.q_count = vsi->alloc_rxq,
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
@@ -1010,6 +1050,13 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
}
+ /* Allow control frames out of main VSI */
+ if (vsi->type == ICE_VSI_PF) {
+ ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ }
+
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -1129,12 +1176,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return -EEXIST;
}
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- num_q_vectors = vsi->num_q_vectors;
- } else {
- err = -EINVAL;
- goto err_out;
- }
+ num_q_vectors = vsi->num_q_vectors;
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx);
@@ -1180,9 +1222,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
return -EEXIST;
}
- if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- return -ENOENT;
-
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
@@ -1477,40 +1516,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
prev_es = &vsi->eth_stats_prev;
cur_es = &vsi->eth_stats;
- ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_bytes,
- &cur_es->rx_bytes);
+ ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_bytes, &cur_es->rx_bytes);
- ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_unicast,
- &cur_es->rx_unicast);
+ ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_unicast, &cur_es->rx_unicast);
- ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_multicast,
- &cur_es->rx_multicast);
+ ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_multicast, &cur_es->rx_multicast);
- ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
- &cur_es->rx_broadcast);
+ ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_broadcast, &cur_es->rx_broadcast);
ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->rx_discards, &cur_es->rx_discards);
- ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_bytes,
- &cur_es->tx_bytes);
+ ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_bytes, &cur_es->tx_bytes);
- ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_unicast,
- &cur_es->tx_unicast);
+ ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_unicast, &cur_es->tx_unicast);
- ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_multicast,
- &cur_es->tx_multicast);
+ ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_multicast, &cur_es->tx_multicast);
- ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
- &cur_es->tx_broadcast);
+ ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_broadcast, &cur_es->tx_broadcast);
ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->tx_errors, &cur_es->tx_errors);
@@ -1658,6 +1689,62 @@ setup_rings:
}
/**
+ * ice_vsi_cfg_txq - Configure single Tx queue
+ * @vsi: the VSI that queue belongs to
+ * @ring: Tx ring to be configured
+ * @tc_q_idx: queue index within given TC
+ * @qg_buf: queue group buffer
+ * @tc: TC that Tx ring belongs to
+ */
+static int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
+ struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
+{
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ u8 buf_len = sizeof(*qg_buf);
+ enum ice_status status;
+ u16 pf_q;
+
+ pf_q = ring->reg_idx;
+ ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+
+ /* Add unique software queue handle of the Tx queue per
+ * TC into the VSI Tx ring
+ */
+ ring->q_handle = tc_q_idx;
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return -ENODEV;
+ }
+
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ ring->txq_teid = le32_to_cpu(txq->q_teid);
+
+ return 0;
+}
+
+/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
@@ -1670,20 +1757,16 @@ static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
- u8 num_q_grps, q_idx = 0;
- enum ice_status status;
- u16 buf_len, i, pf_q;
- int err = 0, tc;
+ u16 q_idx = 0, i;
+ int err = 0;
+ u8 tc;
- buf_len = sizeof(*qg_buf);
- qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
+ qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
qg_buf->num_txqs = 1;
- num_q_grps = 1;
/* set up and configure the Tx queues for each enabled TC */
ice_for_each_traffic_class(tc) {
@@ -1691,39 +1774,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- struct ice_tlan_ctx tlan_ctx = { 0 };
-
- pf_q = vsi->txq_map[q_idx + offset];
- ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
-
- /* init queue specific tail reg. It is referred as
- * transmit comm scheduler queue doorbell.
- */
- rings[q_idx]->tail =
- pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- i, num_q_grps, qg_buf,
- buf_len, NULL);
- if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- err = -ENODEV;
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
+ qg_buf, tc);
+ if (err)
goto err_cfg_txqs;
- }
-
- /* Add Tx Queue TEID into the VSI Tx ring from the
- * response. This will complete configuring and
- * enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- rings[q_idx]->txq_teid =
- le32_to_cpu(txq->q_teid);
q_idx++;
}
@@ -2070,45 +2124,112 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
}
/**
- * ice_vsi_stop_tx_rings - Disable Tx rings
+ * ice_vsi_stop_tx_ring - Disable single Tx ring
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative ID of VF/VM
- * @rings: Tx ring array to be stopped
- * @offset: offset within vsi->txq_map
+ * @ring: Tx ring to be stopped
+ * @txq_meta: Meta data of Tx ring to be stopped
*/
-static int
-ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings, int offset)
+#ifndef CONFIG_PCI_IOV
+static
+#endif /* !CONFIG_PCI_IOV */
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
{
struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
struct ice_hw *hw = &pf->hw;
- int tc, q_idx = 0, err = 0;
- u16 *q_ids, *q_handles, i;
enum ice_status status;
- u32 *q_teids, val;
+ u32 val;
- if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
- return -EINVAL;
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(ring->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(ring->reg_idx), val);
- q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
- GFP_KERNEL);
- if (!q_teids)
- return -ENOMEM;
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
- q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
- GFP_KERNEL);
- if (!q_ids) {
- err = -ENOMEM;
- goto err_alloc_q_ids;
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ q_vector = ring->q_vector;
+ if (q_vector)
+ ice_trigger_sw_intr(hw, q_vector);
+
+ status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
+ txq_meta->tc, 1, &txq_meta->q_handle,
+ &txq_meta->q_id, &txq_meta->q_teid, rst_src,
+ rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an
+ * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * This is not an error as the reset operation disables
+ * queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "LAN Tx queues do not exist, nothing to disable\n");
+ } else if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n", status);
+ return -ENODEV;
}
- q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
- sizeof(*q_handles), GFP_KERNEL);
- if (!q_handles) {
- err = -ENOMEM;
- goto err_alloc_q_handles;
- }
+ return 0;
+}
+
+/**
+ * ice_fill_txq_meta - Prepare the Tx queue's meta data
+ * @vsi: VSI that ring belongs to
+ * @ring: ring that txq_meta will be based on
+ * @txq_meta: a helper struct that wraps Tx queue's information
+ *
+ * Set up a helper struct that will contain all the necessary fields that
+ * are needed for stopping Tx queue
+ */
+#ifndef CONFIG_PCI_IOV
+static
+#endif /* !CONFIG_PCI_IOV */
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ u8 tc = 0;
+
+#ifdef CONFIG_DCB
+ tc = ring->dcb_tc;
+#endif /* CONFIG_DCB */
+ txq_meta->q_id = ring->reg_idx;
+ txq_meta->q_teid = ring->txq_teid;
+ txq_meta->q_handle = ring->q_handle;
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+}
+
+/**
+ * ice_vsi_stop_tx_rings - Disable Tx rings
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative ID of VF/VM
+ * @rings: Tx ring array to be stopped
+ */
+static int
+ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring **rings)
+{
+ u16 i, q_idx = 0;
+ int status;
+ u8 tc;
+
+ if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
+ return -EINVAL;
/* set up the Tx queue list to be disabled for each enabled TC */
ice_for_each_traffic_class(tc) {
@@ -2116,64 +2237,24 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- struct ice_q_vector *q_vector;
-
- if (!rings || !rings[q_idx]) {
- err = -EINVAL;
- goto err_out;
- }
+ struct ice_txq_meta txq_meta = { };
- q_ids[i] = vsi->txq_map[q_idx + offset];
- q_teids[i] = rings[q_idx]->txq_teid;
- q_handles[i] = i;
+ if (!rings || !rings[q_idx])
+ return -EINVAL;
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
+ ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
+ status = ice_vsi_stop_tx_ring(vsi, rst_src,
+ rel_vmvf_num,
+ rings[q_idx], &txq_meta);
- /* software is expected to wait for 100 ns */
- ndelay(100);
-
- /* trigger a software interrupt for the vector
- * associated to the queue to schedule NAPI handler
- */
- q_vector = rings[i]->q_vector;
- if (q_vector)
- ice_trigger_sw_intr(hw, q_vector);
+ if (status)
+ return status;
q_idx++;
}
- status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
- vsi->num_txq, q_handles, q_ids,
- q_teids, rst_src, rel_vmvf_num, NULL);
-
- /* if the disable queue command was exercised during an active
- * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not
- * an error as the reset operation disables queues at the
- * hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&pf->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n",
- status);
- err = -ENODEV;
- }
}
-err_out:
- devm_kfree(&pf->pdev->dev, q_handles);
-
-err_alloc_q_handles:
- devm_kfree(&pf->pdev->dev, q_ids);
-
-err_alloc_q_ids:
- devm_kfree(&pf->pdev->dev, q_teids);
-
- return err;
+ return 0;
}
/**
@@ -2186,8 +2267,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
{
- return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
- 0);
+ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
}
/**
@@ -2519,7 +2599,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -2547,7 +2627,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_cfg_sw_lldp(vsi, true, true);
/* Rx LLDP packets */
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, true);
}
@@ -2610,39 +2690,36 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int base = vsi->base_vector;
+ int i;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- int i;
-
- if (!vsi->q_vectors || !vsi->irqs_ready)
- return;
+ if (!vsi->q_vectors || !vsi->irqs_ready)
+ return;
- ice_vsi_release_msix(vsi);
- if (vsi->type == ICE_VSI_VF)
- return;
+ ice_vsi_release_msix(vsi);
+ if (vsi->type == ICE_VSI_VF)
+ return;
- vsi->irqs_ready = false;
- ice_for_each_q_vector(vsi, i) {
- u16 vector = i + base;
- int irq_num;
+ vsi->irqs_ready = false;
+ ice_for_each_q_vector(vsi, i) {
+ u16 vector = i + base;
+ int irq_num;
- irq_num = pf->msix_entries[vector].vector;
+ irq_num = pf->msix_entries[vector].vector;
- /* free only the irqs that were actually requested */
- if (!vsi->q_vectors[i] ||
- !(vsi->q_vectors[i]->num_ring_tx ||
- vsi->q_vectors[i]->num_ring_rx))
- continue;
+ /* free only the irqs that were actually requested */
+ if (!vsi->q_vectors[i] ||
+ !(vsi->q_vectors[i]->num_ring_tx ||
+ vsi->q_vectors[i]->num_ring_rx))
+ continue;
- /* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
- devm_free_irq(&pf->pdev->dev, irq_num,
- vsi->q_vectors[i]);
- }
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ devm_free_irq(&pf->pdev->dev, irq_num,
+ vsi->q_vectors[i]);
}
}
@@ -2821,15 +2898,17 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/* disable each interrupt */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- ice_for_each_q_vector(vsi, i)
- wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ ice_for_each_q_vector(vsi, i)
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
- ice_flush(hw);
+ ice_flush(hw);
- ice_for_each_q_vector(vsi, i)
- synchronize_irq(pf->msix_entries[i + base].vector);
- }
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(pf->msix_entries[i + base].vector);
}
/**
@@ -2895,7 +2974,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
}
@@ -2962,6 +3041,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->base_vector = 0;
}
+ ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
ice_dev_onetime_setup(&pf->hw);
@@ -2969,6 +3049,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+
+ ret = ice_vsi_alloc_arrays(vsi);
+ if (ret < 0)
+ goto err_vsi;
+
+ ice_vsi_get_qs(vsi);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2976,9 +3062,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret < 0)
goto err_vsi;
- ret = ice_vsi_alloc_arrays(vsi);
- if (ret < 0)
- goto err_vsi;
switch (vsi->type) {
case ICE_VSI_PF:
@@ -2986,6 +3069,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_rings;
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
@@ -3007,10 +3094,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_rings;
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
@@ -3028,7 +3111,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -3145,7 +3228,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
if (ena_tc & BIT(i))
num_tc++;
/* populate max_txqs per TC */
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
}
vsi->tc_cfg.ena_tc = ena_tc;
@@ -3188,3 +3271,33 @@ out:
return ret;
}
#endif /* CONFIG_DCB */
+
+/**
+ * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
+ * @vsi: the VSI being configured MAC filter
+ * @macaddr: the MAC address to be added.
+ * @set: Add or delete a MAC filter
+ *
+ * Adds or removes MAC address filter entry for VF VSI
+ */
+enum ice_status
+ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
+{
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+
+ /* Update MAC filter list to be added or removed for a VSI */
+ if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
+ status = ICE_ERR_NO_MEMORY;
+ goto cfg_mac_fltr_exit;
+ }
+
+ if (set)
+ status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
+ else
+ status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
+
+cfg_mac_fltr_exit:
+ ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6e43ef03bfc3..7faf8db844f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,8 +6,22 @@
#include "ice.h"
-int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr);
+struct ice_txq_meta {
+ /* Tx-scheduler element identifier */
+ u32 q_teid;
+ /* Entry in VSI's txq_map bitmap */
+ u16 q_id;
+ /* Relative index of Tx queue within TC */
+ u16 q_handle;
+ /* VSI index that Tx queue belongs to */
+ u16 vsi_idx;
+ /* TC number that Tx queue belongs to */
+ u8 tc;
+};
+
+int
+ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr);
void ice_free_fltr_list(struct device *dev, struct list_head *h);
@@ -25,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
+
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+
+void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
#endif /* CONFIG_PCI_IOV */
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
@@ -95,4 +119,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
+
+enum ice_status
+ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 63db08d9bafa..f029aee32913 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -9,7 +9,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
-#define DRV_VERSION "0.7.4-k"
+#define DRV_VERSION "0.7.5-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -34,19 +34,17 @@ static const struct net_device_ops ice_netdev_ops;
static void ice_rebuild(struct ice_pf *pf);
static void ice_vsi_release_all(struct ice_pf *pf);
-static void ice_update_vsi_stats(struct ice_vsi *vsi);
-static void ice_update_pf_stats(struct ice_pf *pf);
/**
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
*/
-static u32 ice_get_tx_pending(struct ice_ring *ring)
+static u16 ice_get_tx_pending(struct ice_ring *ring)
{
- u32 head, tail;
+ u16 head, tail;
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -118,10 +116,9 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
*/
static int ice_init_mac_fltr(struct ice_pf *pf)
{
- LIST_HEAD(tmp_add_list);
+ enum ice_status status;
u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
- int status;
vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
if (!vsi)
@@ -132,8 +129,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
*/
/* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_add_mac_to_list(vsi, &tmp_add_list,
- vsi->port_info->mac.perm_addr);
+ status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
if (status)
goto unregister;
@@ -141,18 +137,11 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
* MAC address to the list as well.
*/
eth_broadcast_addr(broadcast);
- status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
if (status)
- goto free_mac_list;
-
- /* Program MAC filters for entries in tmp_add_list */
- status = ice_add_mac(&pf->hw, &tmp_add_list);
- if (status)
- status = -ENOMEM;
-
-free_mac_list:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ goto unregister;
+ return 0;
unregister:
/* We aren't useful with no MAC filters, so unregister if we
* had an error
@@ -166,7 +155,7 @@ unregister:
vsi->netdev = NULL;
}
- return status;
+ return -EIO;
}
/**
@@ -447,13 +436,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- if (!locked) {
+ if (!locked)
rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+
+ ice_stop(vsi->netdev);
+
+ if (!locked)
rtnl_unlock();
- } else {
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- }
} else {
ice_vsi_close(vsi);
}
@@ -488,6 +477,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ int i;
/* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
@@ -497,6 +487,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
+ /* Disable VFs until reset is completed */
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_set_vf_state_qs_dis(&pf->vf[i]);
+
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false);
@@ -810,6 +804,20 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (!vsi || !vsi->port_info)
return -EINVAL;
+ /* turn off PHY if media was removed */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
+ !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ result = ice_aq_set_link_restart_an(pi, false, NULL);
+ if (result) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to set link down, VSI %d error %d\n",
+ vsi->vsi_num, result);
+ return result;
+ }
+ }
+
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -1307,14 +1315,134 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (vf_mdd_detected) {
vf->num_mdd_events++;
- if (vf->num_mdd_events > 1)
- dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n",
+ if (vf->num_mdd_events &&
+ vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
+ dev_info(&pf->pdev->dev,
+ "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events);
}
}
}
/**
+ * ice_force_phys_link_state - Force the physical link state
+ * @vsi: VSI to force the physical link state to up/down
+ * @link_up: true/false indicates to set the physical link to up/down
+ *
+ * Force the physical link state by getting the current PHY capabilities from
+ * hardware and setting the PHY config based on the determined capabilities. If
+ * link changes a link event will be triggered because both the Enable Automatic
+ * Link Update and LESM Enable bits are set when setting the PHY capabilities.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_port_info *pi;
+ struct device *dev;
+ int retcode;
+
+ if (!vsi || !vsi->port_info || !vsi->back)
+ return -EINVAL;
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ dev = &vsi->back->pdev->dev;
+
+ pi = vsi->port_info;
+
+ pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (retcode) {
+ dev_err(dev,
+ "Failed to get phy capabilities, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ goto out;
+ }
+
+ /* No change in link */
+ if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
+ link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
+ goto out;
+
+ cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ retcode = -ENOMEM;
+ goto out;
+ }
+
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg->low_power_ctrl = pcaps->low_power_ctrl;
+ cfg->eee_cap = pcaps->eee_cap;
+ cfg->eeer_value = pcaps->eeer_value;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ if (link_up)
+ cfg->caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
+
+ retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+ if (retcode) {
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ }
+
+ devm_kfree(dev, cfg);
+out:
+ devm_kfree(dev, pcaps);
+ return retcode;
+}
+
+/**
+ * ice_check_media_subtask - Check for media; bring link up if detected.
+ * @pf: pointer to PF struct
+ */
+static void ice_check_media_subtask(struct ice_pf *pf)
+{
+ struct ice_port_info *pi;
+ struct ice_vsi *vsi;
+ int err;
+
+ vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
+ if (!vsi)
+ return;
+
+ /* No need to check for media if it's already present or the interface
+ * is down
+ */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
+ test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ /* Refresh link info and check if media is present */
+ pi = vsi->port_info;
+ err = ice_update_link_info(pi);
+ if (err)
+ return;
+
+ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_force_phys_link_state(vsi, true);
+ if (err)
+ return;
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
*/
@@ -1336,6 +1464,7 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf);
@@ -1369,8 +1498,8 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
- hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
- hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
+ hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
}
@@ -1409,15 +1538,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
*/
static int ice_vsi_ena_irq(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
+ struct ice_hw *hw = &vsi->back->hw;
+ int i;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- int i;
-
- ice_for_each_q_vector(vsi, i)
- ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
- }
+ ice_for_each_q_vector(vsi, i)
+ ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
ice_flush(hw);
return 0;
@@ -1665,7 +1790,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0);
ice_flush(hw);
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
+ if (pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
pf->msix_entries[pf->oicr_idx].vector, pf);
@@ -2082,16 +2207,25 @@ static void ice_deinit_pf(struct ice_pf *pf)
ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->avail_q_mutex);
+
+ if (pf->avail_txqs) {
+ bitmap_free(pf->avail_txqs);
+ pf->avail_txqs = NULL;
+ }
+
+ if (pf->avail_rxqs) {
+ bitmap_free(pf->avail_rxqs);
+ pf->avail_rxqs = NULL;
+ }
}
/**
* ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize
*/
-static void ice_init_pf(struct ice_pf *pf)
+static int ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
- set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
struct ice_hw *hw = &pf->hw;
@@ -2105,12 +2239,6 @@ static void ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex);
- /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
- mutex_lock(&pf->avail_q_mutex);
- bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
- bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
- mutex_unlock(&pf->avail_q_mutex);
-
if (pf->hw.func_caps.common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
@@ -2119,6 +2247,22 @@ static void ice_init_pf(struct ice_pf *pf)
pf->serv_tmr_period = HZ;
INIT_WORK(&pf->serv_task, ice_service_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state);
+
+ pf->max_pf_txqs = pf->hw.func_caps.common_cap.num_txq;
+ pf->max_pf_rxqs = pf->hw.func_caps.common_cap.num_rxq;
+
+ pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ if (!pf->avail_txqs)
+ return -ENOMEM;
+
+ pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
+ if (!pf->avail_rxqs) {
+ devm_kfree(&pf->pdev->dev, pf->avail_txqs);
+ pf->avail_txqs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
}
/**
@@ -2137,13 +2281,18 @@ static int ice_ena_msix_range(struct ice_pf *pf)
/* reserve one vector for miscellaneous handler */
needed = 1;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
v_budget += needed;
v_left -= needed;
/* reserve vectors for LAN traffic */
- pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
- v_budget += pf->num_lan_msix;
- v_left -= pf->num_lan_msix;
+ needed = min_t(int, num_online_cpus(), v_left);
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ pf->num_lan_msix = needed;
+ v_budget += needed;
+ v_left -= needed;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
@@ -2168,18 +2317,18 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(&pf->pdev->dev,
- "not enough vectors. requested = %d, obtained = %d\n",
+ "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
- if (v_actual >= (pf->num_lan_msix + 1)) {
- pf->num_avail_sw_msix = v_actual -
- (pf->num_lan_msix + 1);
- } else if (v_actual >= 2) {
- pf->num_lan_msix = 1;
- pf->num_avail_sw_msix = v_actual - 2;
- } else {
+/* 2 vectors for LAN (traffic + OICR) */
+#define ICE_MIN_LAN_VECS 2
+
+ if (v_actual < ICE_MIN_LAN_VECS) {
+ /* error if we can't get minimum vectors */
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
+ } else {
+ pf->num_lan_msix = ICE_MIN_LAN_VECS;
}
}
@@ -2189,9 +2338,13 @@ msix_err:
devm_kfree(&pf->pdev->dev, pf->msix_entries);
goto exit_err;
+no_hw_vecs_left_err:
+ dev_err(&pf->pdev->dev,
+ "not enough device MSI-X vectors. requested = %d, available = %d\n",
+ needed, v_left);
+ err = -ERANGE;
exit_err:
pf->num_lan_msix = 0;
- clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
return err;
}
@@ -2204,7 +2357,6 @@ static void ice_dis_msix(struct ice_pf *pf)
pci_disable_msix(pf->pdev);
devm_kfree(&pf->pdev->dev, pf->msix_entries);
pf->msix_entries = NULL;
- clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
}
/**
@@ -2213,8 +2365,7 @@ static void ice_dis_msix(struct ice_pf *pf)
*/
static void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_dis_msix(pf);
+ ice_dis_msix(pf);
if (pf->irq_tracker) {
devm_kfree(&pf->pdev->dev, pf->irq_tracker);
@@ -2230,10 +2381,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int vectors;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- vectors = ice_ena_msix_range(pf);
- else
- return -ENODEV;
+ vectors = ice_ena_msix_range(pf);
if (vectors < 0)
return vectors;
@@ -2349,7 +2497,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
- ice_init_pf(pf);
+ err = ice_init_pf(pf);
+ if (err) {
+ dev_err(dev, "ice_init_pf failed: %d\n", err);
+ goto err_init_pf_unroll;
+ }
err = ice_init_pf_dcb(pf, false);
if (err) {
@@ -2390,12 +2542,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_init_interrupt_unroll;
- }
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
+ goto err_init_interrupt_unroll;
}
/* create switch struct for the switch element created by FW on boot */
@@ -2483,9 +2633,9 @@ static void ice_remove(struct pci_dev *pdev)
continue;
ice_vsi_free_q_vectors(pf->vsi[i]);
}
- ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
+ ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
}
@@ -2711,10 +2861,8 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
struct ice_hw *hw = &pf->hw;
struct sockaddr *addr = pi;
enum ice_status status;
- LIST_HEAD(a_mac_list);
- LIST_HEAD(r_mac_list);
u8 flags = 0;
- int err;
+ int err = 0;
u8 *mac;
mac = (u8 *)addr->sa_data;
@@ -2737,42 +2885,23 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* When we change the MAC address we also have to change the MAC address
* based filter rules that were created previously for the old MAC
* address. So first, we remove the old filter rule using ice_remove_mac
- * and then create a new filter rule using ice_add_mac. Note that for
- * both these operations, we first need to form a "list" of MAC
- * addresses (even though in this case, we have only 1 MAC address to be
- * added/removed) and this done using ice_add_mac_to_list. Depending on
- * the ensuing operation this "list" of MAC addresses is either to be
- * added or removed from the filter.
+ * and then create a new filter rule using ice_add_mac via
+ * ice_vsi_cfg_mac_fltr function call for both add and/or remove
+ * filters.
*/
- err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
- if (err) {
- err = -EADDRNOTAVAIL;
- goto free_lists;
- }
-
- status = ice_remove_mac(hw, &r_mac_list);
+ status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
if (status) {
err = -EADDRNOTAVAIL;
- goto free_lists;
+ goto err_update_filters;
}
- err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
- if (err) {
- err = -EADDRNOTAVAIL;
- goto free_lists;
- }
-
- status = ice_add_mac(hw, &a_mac_list);
+ status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
if (status) {
err = -EADDRNOTAVAIL;
- goto free_lists;
+ goto err_update_filters;
}
-free_lists:
- /* free list entries */
- ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
- ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
-
+err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
@@ -2788,8 +2917,8 @@ free_lists:
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n",
- mac);
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
+ mac, status);
}
return 0;
}
@@ -3008,10 +3137,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int err;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_vsi_cfg_msix(vsi);
- else
- return -ENOTSUPP;
+ ice_vsi_cfg_msix(vsi);
/* Enable only Rx rings, Tx rings were enabled by the FW when the
* Tx queue group list was configured and the context bits were
@@ -3132,7 +3258,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
* ice_update_vsi_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
-static void ice_update_vsi_stats(struct ice_vsi *vsi)
+void ice_update_vsi_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
struct ice_eth_stats *cur_es = &vsi->eth_stats;
@@ -3159,6 +3285,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes;
cur_ns->rx_length_errors = pf->stats.rx_len_errors;
+ /* record drops from the port level */
+ cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
}
}
@@ -3166,149 +3294,139 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi)
* ice_update_pf_stats - Update PF port stats counters
* @pf: PF whose stats needs to be updated
*/
-static void ice_update_pf_stats(struct ice_pf *pf)
+void ice_update_pf_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
- u8 pf_id;
+ u8 port;
+ port = hw->port_info->lport;
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
- pf_id = hw->pf_id;
- ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
+ ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes);
- ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
+ ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_unicast,
&cur_ps->eth.rx_unicast);
- ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
+ ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_multicast,
&cur_ps->eth.rx_multicast);
- ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
+ ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_broadcast,
&cur_ps->eth.rx_broadcast);
- ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
+ ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
+ &prev_ps->eth.rx_discards,
+ &cur_ps->eth.rx_discards);
+
+ ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_bytes,
&cur_ps->eth.tx_bytes);
- ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
+ ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_unicast,
&cur_ps->eth.tx_unicast);
- ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
+ ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_multicast,
&cur_ps->eth.tx_multicast);
- ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
+ ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_broadcast,
&cur_ps->eth.tx_broadcast);
- ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
&prev_ps->tx_dropped_link_down,
&cur_ps->tx_dropped_link_down);
- ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_64,
- &cur_ps->rx_size_64);
+ ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
+ &prev_ps->rx_size_64, &cur_ps->rx_size_64);
- ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_127,
- &cur_ps->rx_size_127);
+ ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
+ &prev_ps->rx_size_127, &cur_ps->rx_size_127);
- ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_255,
- &cur_ps->rx_size_255);
+ ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
+ &prev_ps->rx_size_255, &cur_ps->rx_size_255);
- ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_511,
- &cur_ps->rx_size_511);
+ ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
+ &prev_ps->rx_size_511, &cur_ps->rx_size_511);
- ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
- GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
- ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
- GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
- ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
- GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_big, &cur_ps->rx_size_big);
- ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_64,
- &cur_ps->tx_size_64);
+ ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
+ &prev_ps->tx_size_64, &cur_ps->tx_size_64);
- ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_127,
- &cur_ps->tx_size_127);
+ ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
+ &prev_ps->tx_size_127, &cur_ps->tx_size_127);
- ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_255,
- &cur_ps->tx_size_255);
+ ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
+ &prev_ps->tx_size_255, &cur_ps->tx_size_255);
- ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_511,
- &cur_ps->tx_size_511);
+ ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
+ &prev_ps->tx_size_511, &cur_ps->tx_size_511);
- ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
- GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
- ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
- GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
- ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
- GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big);
- ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
- ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
- ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
- ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
&prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
ice_update_dcb_stats(pf);
- ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
&prev_ps->crc_errors, &cur_ps->crc_errors);
- ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
&prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
- ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
&prev_ps->mac_local_faults,
&cur_ps->mac_local_faults);
- ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
- ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
&prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
- ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
- ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
&prev_ps->rx_fragments, &cur_ps->rx_fragments);
- ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
&prev_ps->rx_oversize, &cur_ps->rx_oversize);
- ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
&prev_ps->rx_jabber, &cur_ps->rx_jabber);
pf->stat_prev_loaded = true;
@@ -3372,85 +3490,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
- * ice_force_phys_link_state - Force the physical link state
- * @vsi: VSI to force the physical link state to up/down
- * @link_up: true/false indicates to set the physical link to up/down
- *
- * Force the physical link state by getting the current PHY capabilities from
- * hardware and setting the PHY config based on the determined capabilities. If
- * link changes a link event will be triggered because both the Enable Automatic
- * Link Update and LESM Enable bits are set when setting the PHY capabilities.
- *
- * Returns 0 on success, negative on failure
- */
-static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
-{
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_aqc_set_phy_cfg_data *cfg;
- struct ice_port_info *pi;
- struct device *dev;
- int retcode;
-
- if (!vsi || !vsi->port_info || !vsi->back)
- return -EINVAL;
- if (vsi->type != ICE_VSI_PF)
- return 0;
-
- dev = &vsi->back->pdev->dev;
-
- pi = vsi->port_info;
-
- pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
- if (!pcaps)
- return -ENOMEM;
-
- retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
- NULL);
- if (retcode) {
- dev_err(dev,
- "Failed to get phy capabilities, VSI %d error %d\n",
- vsi->vsi_num, retcode);
- retcode = -EIO;
- goto out;
- }
-
- /* No change in link */
- if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
- link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
- goto out;
-
- cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- retcode = -ENOMEM;
- goto out;
- }
-
- cfg->phy_type_low = pcaps->phy_type_low;
- cfg->phy_type_high = pcaps->phy_type_high;
- cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- cfg->low_power_ctrl = pcaps->low_power_ctrl;
- cfg->eee_cap = pcaps->eee_cap;
- cfg->eeer_value = pcaps->eeer_value;
- cfg->link_fec_opt = pcaps->link_fec_options;
- if (link_up)
- cfg->caps |= ICE_AQ_PHY_ENA_LINK;
- else
- cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
-
- retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
- if (retcode) {
- dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
- vsi->vsi_num, retcode);
- retcode = -EIO;
- }
-
- devm_kfree(dev, cfg);
-out:
- devm_kfree(dev, pcaps);
- return retcode;
-}
-
-/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*/
@@ -3559,24 +3598,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
/**
- * ice_vsi_req_irq - Request IRQ from the OS
- * @vsi: The VSI IRQ is being requested for
- * @basename: name for the vector
- *
- * Return 0 on success and a negative value on error
- */
-static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
-{
- struct ice_pf *pf = vsi->back;
- int err = -EINVAL;
-
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- err = ice_vsi_req_irq_msix(vsi, basename);
-
- return err;
-}
-
-/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -3605,7 +3626,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
- err = ice_vsi_req_irq(vsi, int_name);
+ err = ice_vsi_req_irq_msix(vsi, int_name);
if (err)
goto err_setup_rx;
@@ -3669,23 +3690,19 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
int err = 0;
if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
- return err;
+ return 0;
clear_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
- struct net_device *netd = vsi->netdev;
-
if (netif_running(vsi->netdev)) {
- if (locked) {
- err = netd->netdev_ops->ndo_open(netd);
- } else {
+ if (!locked)
rtnl_lock();
- err = netd->netdev_ops->ndo_open(netd);
+
+ err = ice_open(vsi->netdev);
+
+ if (!locked)
rtnl_unlock();
- }
- } else {
- err = ice_vsi_open(vsi);
}
}
@@ -3723,22 +3740,23 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
/* loop through pf->vsi array and reinit the VSI if found */
ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
int err;
- if (!pf->vsi[i])
+ if (!vsi)
continue;
- err = ice_vsi_rebuild(pf->vsi[i]);
+ err = ice_vsi_rebuild(vsi);
if (err) {
dev_err(&pf->pdev->dev,
"VSI at index %d rebuild failed\n",
- pf->vsi[i]->idx);
+ vsi->idx);
return err;
}
dev_info(&pf->pdev->dev,
"VSI at index %d rebuilt. vsi_num = 0x%x\n",
- pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ vsi->idx, vsi->vsi_num);
}
return 0;
@@ -3756,25 +3774,27 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
/* loop through pf->vsi array and replay the VSI if found */
ice_for_each_vsi(pf, i) {
- if (!pf->vsi[i])
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (!vsi)
continue;
- ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
+ ret = ice_replay_vsi(hw, vsi->idx);
if (ret) {
dev_err(&pf->pdev->dev,
"VSI at index %d replay failed %d\n",
- pf->vsi[i]->idx, ret);
+ vsi->idx, ret);
return -EIO;
}
/* Re-map HW VSI number, using VSI handle that has been
* previously validated in ice_replay_vsi() call above
*/
- pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
+ vsi->vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
dev_info(&pf->pdev->dev,
"VSI at index %d filter replayed successfully - vsi_num %i\n",
- pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ vsi->idx, vsi->vsi_num);
}
/* Clean up replay filter after successful re-configuration */
@@ -3842,12 +3862,10 @@ static void ice_rebuild(struct ice_pf *pf)
}
/* start misc vector */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "misc vector setup failed: %d\n", err);
- goto err_vsi_rebuild;
- }
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "misc vector setup failed: %d\n", err);
+ goto err_vsi_rebuild;
}
/* restart the VSIs that were rebuilt and running before the reset */
@@ -4244,9 +4262,7 @@ static void ice_tx_timeout(struct net_device *netdev)
head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- val = rd32(hw,
- GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
@@ -4295,6 +4311,7 @@ int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
int err;
if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
@@ -4304,13 +4321,33 @@ int ice_open(struct net_device *netdev)
netif_carrier_off(netdev);
- err = ice_force_phys_link_state(vsi, true);
+ pi = vsi->port_info;
+ err = ice_update_link_info(pi);
if (err) {
- netdev_err(netdev,
- "Failed to set physical link up, error %d\n", err);
+ netdev_err(netdev, "Failed to get link info, error %d\n",
+ err);
return err;
}
+ /* Set PHY if there is media, otherwise, turn off PHY */
+ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_force_phys_link_state(vsi, true);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set physical link up, error %d\n",
+ err);
+ return err;
+ }
+ } else {
+ err = ice_aq_set_link_restart_an(pi, false, NULL);
+ if (err) {
+ netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
+ vsi->vsi_num, err);
+ return err;
+ }
+ set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
+ }
+
err = ice_vsi_open(vsi);
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2a232504379d..79d64f9ed609 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -260,33 +260,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
/**
* ice_sched_get_first_node - get the first node of the given layer
- * @hw: pointer to the HW struct
+ * @pi: port information structure
* @parent: pointer the base node of the subtree
* @layer: layer number
*
* This function retrieves the first node of the given layer from the subtree
*/
static struct ice_sched_node *
-ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
- u8 layer)
+ice_sched_get_first_node(struct ice_port_info *pi,
+ struct ice_sched_node *parent, u8 layer)
{
- u8 i;
-
- if (layer < hw->sw_entry_point_layer)
- return NULL;
- for (i = 0; i < parent->num_children; i++) {
- struct ice_sched_node *node = parent->children[i];
-
- if (node) {
- if (node->tx_sched_layer == layer)
- return node;
- /* this recursion is intentional, and wouldn't
- * go more than 9 calls
- */
- return ice_sched_get_first_node(hw, node, layer);
- }
- }
- return NULL;
+ return pi->sib_head[parent->tc_num][layer];
}
/**
@@ -342,7 +326,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
parent = node->parent;
/* root has no parent */
if (parent) {
- struct ice_sched_node *p, *tc_node;
+ struct ice_sched_node *p;
/* update the parent */
for (i = 0; i < parent->num_children; i++)
@@ -354,16 +338,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
break;
}
- /* search for previous sibling that points to this node and
- * remove the reference
- */
- tc_node = ice_sched_get_tc_node(pi, node->tc_num);
- if (!tc_node) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Invalid TC number %d\n", node->tc_num);
- goto err_exit;
- }
- p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
+ p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
while (p) {
if (p->sibling == node) {
p->sibling = node->sibling;
@@ -371,8 +346,13 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
}
p = p->sibling;
}
+
+ /* update the sibling head if head is getting removed */
+ if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
+ pi->sib_head[node->tc_num][node->tx_sched_layer] =
+ node->sibling;
}
-err_exit:
+
/* leaf nodes have no children */
if (node->children)
devm_kfree(ice_hw_to_dev(hw), node->children);
@@ -743,13 +723,17 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/* add it to previous node sibling pointer */
/* Note: siblings are not linked across branches */
- prev = ice_sched_get_first_node(hw, tc_node, layer);
+ prev = ice_sched_get_first_node(pi, tc_node, layer);
if (prev && prev != new_node) {
while (prev->sibling)
prev = prev->sibling;
prev->sibling = new_node;
}
+ /* initialize the sibling head */
+ if (!pi->sib_head[tc_node->tc_num][layer])
+ pi->sib_head[tc_node->tc_num][layer] = new_node;
+
if (i == 0)
*first_node_teid = teid;
}
@@ -1160,7 +1144,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
goto lan_q_exit;
/* get the first queue group node from VSI sub-tree */
- qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
@@ -1191,7 +1175,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
u8 vsi_layer;
vsi_layer = ice_sched_get_vsi_layer(hw);
- node = ice_sched_get_first_node(hw, tc_node, vsi_layer);
+ node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
/* Check whether it already exists */
while (node) {
@@ -1316,7 +1300,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/* If intermediate nodes are reached max children
* then add a new one.
*/
- node = ice_sched_get_first_node(hw, tc_node, (u8)i);
+ node = ice_sched_get_first_node(hw->port_info, tc_node,
+ (u8)i);
/* scan all the siblings */
while (node) {
if (node->num_children < hw->max_children[i])
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 8271fd651725..99cf527d2b1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2137,6 +2137,38 @@ out:
}
/**
+ * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
+ * @hw: pointer to the hardware structure
+ * @recp_id: lookup type for which the specified rule needs to be searched
+ * @f_info: rule information
+ *
+ * Helper function to search for a unicast rule entry - this is to be used
+ * to remove unicast MAC filter that is not shared with other VSIs on the
+ * PF switch.
+ *
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_fltr_mgmt_list_entry *
+ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_info *f_info)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
+ sizeof(f_info->l_data)) &&
+ f_info->fwd_id.hw_vsi_id ==
+ list_itr->fltr_info.fwd_id.hw_vsi_id &&
+ f_info->flag == list_itr->fltr_info.flag)
+ return list_itr;
+ }
+ return NULL;
+}
+
+/**
* ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -2153,15 +2185,39 @@ enum ice_status
ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
return ICE_ERR_PARAM;
+ rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
+ u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+ u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
return ICE_ERR_PARAM;
+
+ vsi_handle = list_itr->fltr_info.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ list_itr->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
+ /* Don't remove the unicast address that belongs to
+ * another VSI on the switch, since it is not being
+ * shared...
+ */
+ mutex_lock(rule_lock);
+ if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
+ &list_itr->fltr_info)) {
+ mutex_unlock(rule_lock);
+ return ICE_ERR_DOES_NOT_EXIST;
+ }
+ mutex_unlock(rule_lock);
+ }
list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_MAC,
list_itr);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3c83230434b6..5bf5c179a738 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -377,18 +377,28 @@ err:
*/
static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
{
+ u16 prev_ntu = rx_ring->next_to_use;
+
rx_ring->next_to_use = val;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = val;
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ /* QRX_TAIL will be updated with any tail value, but hardware ignores
+ * the lower 3 bits. This makes it so we only bump tail on meaningful
+ * boundaries. Also, this allows us to bump tail on intervals of 8 up to
+ * the budget depending on the current traffic load.
*/
- wmb();
- writel(val, rx_ring->tail);
+ val &= ~0x7;
+ if (prev_ntu != val) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+ }
}
/**
@@ -445,7 +455,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns false if all allocations were successful, true if any fail
+ * Returns false if all allocations were successful, true if any fail. Returning
+ * true signals to the caller that we didn't replace cleaned_count buffers and
+ * there is more work to do.
+ *
+ * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
+ * buffers. Then bump tail at most one time. Grouping like this lets us avoid
+ * multiple tail writes per call.
*/
bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
{
@@ -462,8 +478,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buf[ntu];
do {
+ /* if we fail here, we have work remaining */
if (!ice_alloc_mapped_page(rx_ring, bi))
- goto no_bufs;
+ break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
@@ -494,16 +511,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
- return false;
-
-no_bufs:
- if (rx_ring->next_to_use != ntu)
- ice_release_rx_desc(rx_ring, ntu);
-
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ return !!cleaned_count;
}
/**
@@ -599,6 +607,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int truesize = ICE_RXBUF_2048;
#endif
+ if (!size)
+ return;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
rx_buf->page_offset, size, truesize);
@@ -654,6 +664,8 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
+ if (!size)
+ return rx_buf;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
rx_buf->page_offset, size,
@@ -737,8 +749,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- /* hand second half of page back to the ring */
+ if (!rx_buf)
+ return;
+
if (ice_can_reuse_rx_page(rx_buf)) {
+ /* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
rx_ring->rx_stats.page_reuse_count++;
} else {
@@ -990,7 +1005,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
- bool failure = false;
+ bool failure;
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
@@ -1002,13 +1017,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 vlan_tag = 0;
u8 rx_ptype;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= ICE_RX_BUF_WRITE) {
- failure = failure ||
- ice_alloc_rx_bufs(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
@@ -1030,8 +1038,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
+ /* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
- /* allocate (if needed) and populate skb */
+
if (skb)
ice_add_rx_frag(rx_buf, skb, size);
else
@@ -1040,7 +1049,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
- rx_buf->pagecnt_bias++;
+ if (rx_buf)
+ rx_buf->pagecnt_bias++;
break;
}
@@ -1085,6 +1095,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
}
+ /* return up to cleaned_count buffers to hardware */
+ failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+
/* update queue and vector specific stats */
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.pkts += total_rx_pkts;
@@ -1351,6 +1364,23 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
struct ice_ring_container *rx = &q_vector->rx;
u32 itr_val;
+ /* when exiting WB_ON_ITR lets set a low ITR value and trigger
+ * interrupts to expire right away in case we have more work ready to go
+ * already
+ */
+ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
+ itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
+ /* set target back to last user set value */
+ rx->target_itr = rx->itr_setting;
+ /* set current to what we just wrote and dynamic if needed */
+ rx->current_itr = ICE_WB_ON_ITR_USECS |
+ (rx->itr_setting & ICE_ITR_DYNAMIC);
+ /* allow normal interrupt flow to start */
+ q_vector->itr_countdown = 0;
+ return;
+ }
+
/* This will do nothing if dynamic updates are not enabled */
ice_update_itr(q_vector, tx);
ice_update_itr(q_vector, rx);
@@ -1396,6 +1426,41 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
}
/**
+ * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
+ * @vsi: pointer to the VSI structure
+ * @q_vector: q_vector to set WB_ON_ITR on
+ *
+ * We need to tell hardware to write-back completed descriptors even when
+ * interrupts are disabled. Descriptors will be written back on cache line
+ * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
+ * descriptors may not be written back if they don't fill a cache line until the
+ * next interrupt.
+ *
+ * This sets the write-back frequency to 2 microseconds as that is the minimum
+ * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
+ * make sure hardware knows we aren't meddling with the INTENA_M bit.
+ */
+static void
+ice_set_wb_on_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ /* already in WB_ON_ITR mode no need to change it */
+ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
+ return;
+
+ if (q_vector->num_ring_rx)
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
+ ICE_RX_ITR));
+
+ if (q_vector->num_ring_tx)
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
+ ICE_TX_ITR));
+
+ q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
+}
+
+/**
* ice_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1409,10 +1474,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi);
struct ice_vsi *vsi = q_vector->vsi;
- struct ice_pf *pf = vsi->back;
bool clean_complete = true;
- int budget_per_ring = 0;
struct ice_ring *ring;
+ int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
@@ -1426,11 +1490,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (budget <= 0)
return budget;
- /* We attempt to distribute budget to each Rx queue fairly, but don't
- * allow the budget to go below 1 because that would exit polling early.
- */
- if (q_vector->num_ring_rx)
+ /* normally we have 1 Rx ring per q_vector */
+ if (unlikely(q_vector->num_ring_rx > 1))
+ /* We attempt to distribute budget to each Rx queue fairly, but
+ * don't allow the budget to go below 1 because that would exit
+ * polling early.
+ */
budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
+ else
+ /* Max of 1 Rx ring in this q_vector so give it the budget */
+ budget_per_ring = budget;
ice_for_each_ring(ring, q_vector->rx) {
int cleaned;
@@ -1450,8 +1519,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_update_ena_itr(vsi, q_vector);
+ ice_update_ena_itr(vsi, q_vector);
+ else
+ ice_set_wb_on_itr(vsi, q_vector);
return min_t(int, work_done, budget - 1);
}
@@ -1521,7 +1591,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
{
u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int data_len, size;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
@@ -1923,7 +1993,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
*/
static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int count = 0, size = skb_headlen(skb);
@@ -1954,7 +2024,7 @@ static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
*/
static bool __ice_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -2036,6 +2106,7 @@ static netdev_tx_t
ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
{
struct ice_tx_offload_params offload = { 0 };
+ struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
unsigned int count;
int tso, csum;
@@ -2083,7 +2154,15 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
if (csum < 0)
goto out_drop;
- if (tso || offload.cd_tunnel_params) {
+ /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
+ if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->is_sw_lldp))
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ ICE_TX_CTX_DESC_SWTCH_UPLINK <<
+ ICE_TXD_CTX_QW1_CMD_S);
+
+ if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
int i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ec76aba347b9..94a9280193e2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -144,6 +144,19 @@ enum ice_rx_dtype {
#define ICE_DFLT_INTRL 0
#define ICE_MAX_INTRL 236
+#define ICE_WB_ON_ITR_USECS 2
+#define ICE_IN_WB_ON_ITR_MODE 255
+/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
+ * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
+ * set the write-back latency to the usecs passed in.
+ */
+#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
+ ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
+ GLINT_DYN_CTL_INTERVAL_M) | \
+ (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
+ GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
+ GLINT_DYN_CTL_WB_ON_ITR_M)
+
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
#define ICE_TX_LEGACY 1
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 24bbef8bbe69..b538d0b9eb80 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -13,9 +13,9 @@
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
-static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
+static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
- return test_bit(tc, (unsigned long *)&bitmap);
+ return test_bit(tc, &bitmap);
}
/* Driver always calls main vsi_handle first */
@@ -347,6 +347,8 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
+ struct ice_sched_node *
+ sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 5d24b539648f..b93324e9f4bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -252,6 +252,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
}
/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+ /* Clear Rx/Tx enabled queues flag */
+ bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
+ vf->num_qs_ena = 0;
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/**
+ * ice_dis_vf_qs - Disable the VF queues
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_qs(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+ ice_vsi_stop_rx_rings(vsi);
+ ice_set_vf_state_qs_dis(vf);
+}
+
+/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -267,19 +296,9 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
- continue;
-
- vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
- /* stop rings without wait time */
- ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
- ice_vsi_stop_rx_rings(vsi);
-
- clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
- }
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
+ ice_dis_vf_qs(&pf->vf[i]);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
@@ -297,13 +316,6 @@ void ice_free_vfs(struct ice_pf *pf)
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
/* disable VF qp mappings */
ice_dis_vf_mappings(&pf->vf[i]);
-
- /* Set this state so that assigned VF vectors can be
- * reclaimed by PF for reuse in ice_vsi_release(). No
- * need to clear this bit since pf->vf array is being
- * freed anyways after this for loop
- */
- set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
ice_free_vf_res(&pf->vf[i]);
}
}
@@ -389,12 +401,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
wr32(hw, PF_PCI_CIAA,
VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
- for (i = 0; i < 100; i++) {
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
reg = rd32(hw, PF_PCI_CIAD);
- if ((reg & VF_TRANS_PENDING_M) != 0)
- dev_err(&pf->pdev->dev,
- "VF %d PCI transactions stuck\n", vf->vf_id);
- udelay(1);
+ /* no transactions pending so stop polling */
+ if ((reg & VF_TRANS_PENDING_M) == 0)
+ break;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d PCI transactions stuck\n", vf->vf_id);
+ udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -481,19 +496,20 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
}
/**
- * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW
+ * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
* @pf: pointer to PF structure
* @vf: pointer to VF that the first MSIX vector index is being calculated for
*
- * This returns the first MSIX vector index in HW that is used by this VF and
- * this will always be the OICR index in the AVF driver so any functionality
+ * This returns the first MSIX vector index in PF space that is used by this VF.
+ * This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
* using vf->first_vector_idx for queue configuration will have to increment by
* 1 to avoid meddling with the OICR index.
*/
static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
{
- return pf->hw.func_caps.common_cap.msix_vector_first_id +
- pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
+ return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
}
/**
@@ -543,7 +559,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev, "could not add mac filters\n");
+ dev_err(&pf->pdev->dev,
+ "could not add mac filters error %d\n", status);
+ else
+ vf->num_mac = 1;
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
@@ -551,7 +570,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
- clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
ice_alloc_vsi_res_exit:
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
@@ -567,11 +585,6 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
int tx_rx_queue_left;
int status;
- /* setup VF VSI and necessary resources */
- status = ice_alloc_vsi_res(vf);
- if (status)
- goto ice_alloc_vf_res_exit;
-
/* Update number of VF queues, in case VF had requested for queue
* changes
*/
@@ -581,6 +594,11 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;
+ /* setup VF VSI and necessary resources */
+ status = ice_alloc_vsi_res(vf);
+ if (status)
+ goto ice_alloc_vf_res_exit;
+
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
@@ -605,27 +623,30 @@ ice_alloc_vf_res_exit:
*/
static void ice_ena_vf_mappings(struct ice_vf *vf)
{
+ int abs_vf_id, abs_first, abs_last;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int first, last, v;
struct ice_hw *hw;
- int abs_vf_id;
u32 reg;
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
last = (first + pf->num_vf_msix) - 1;
+ abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
+ abs_last = (abs_first + pf->num_vf_msix) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* VF Vector allocation */
- reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
- ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+ reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+ ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
- ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
+ reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
+ & VPINT_ALLOC_PCI_FIRST_M) |
+ ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
@@ -983,6 +1004,47 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
}
/**
+ * ice_config_res_vfs - Finalize allocation of VFs resources in one go
+ * @pf: pointer to the PF structure
+ *
+ * This function is being called as last part of resetting all VFs, or when
+ * configuring VFs for the first time, where there is no resource to be freed
+ * Returns true if resources were properly allocated for all VFs, and false
+ * otherwise.
+ */
+static bool ice_config_res_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int v;
+
+ if (ice_check_avail_res(pf)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate VF resources, try with fewer number of VFs\n");
+ return false;
+ }
+
+ /* rearm global interrupts */
+ if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
+ /* Finish resetting each VF and allocate resources */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ struct ice_vf *vf = &pf->vf[v];
+
+ vf->num_vf_qs = pf->num_vf_qps;
+ dev_dbg(&pf->pdev->dev,
+ "VF-id %d has %d queues configured\n",
+ vf->vf_id, vf->num_vf_qs);
+ ice_cleanup_and_realloc_vf(vf);
+ }
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -1012,17 +1074,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr);
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- struct ice_vsi *vsi;
-
- vf = &pf->vf[v];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
- ice_vsi_stop_rx_rings(vsi);
- clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
- }
- }
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[v].vf_states))
+ ice_dis_vf_qs(&pf->vf[v]);
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1031,7 +1085,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* finished resetting.
*/
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
- usleep_range(10000, 20000);
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
@@ -1039,8 +1092,11 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+ if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
+ /* only delay if the check failed */
+ usleep_range(10, 20);
break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1054,7 +1110,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
*/
if (v < pf->num_alloc_vfs)
dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
- usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) {
@@ -1074,25 +1129,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_err(&pf->pdev->dev,
"Failed to free MSIX resources used by SR-IOV\n");
- if (ice_check_avail_res(pf)) {
- dev_err(&pf->pdev->dev,
- "Cannot allocate VF resources, try with fewer number of VFs\n");
+ if (!ice_config_res_vfs(pf))
return false;
- }
-
- /* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- vf = &pf->vf[v];
-
- vf->num_vf_qs = pf->num_vf_qps;
- dev_dbg(&pf->pdev->dev,
- "VF-id %d has %d queues configured\n",
- vf->vf_id, vf->num_vf_qs);
- ice_cleanup_and_realloc_vf(vf);
- }
-
- ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@@ -1114,27 +1152,31 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
u32 reg;
int i;
- /* If the VFs have been disabled, this means something else is
- * resetting the VF, so we shouldn't continue.
+ /* If the PF has been disabled, there is no need resetting VF until
+ * PF is active again.
*/
- if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ if (test_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ /* If the VF has been disabled, this means something else is
+ * resetting the VF, so we shouldn't continue. Otherwise, set
+ * disable VF state bit for actual reset, and continue.
+ */
+ if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
return false;
ice_trigger_vf_reset(vf, is_vflr);
vsi = pf->vsi[vf->lan_vsi_idx];
- if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
- ice_vsi_stop_rx_rings(vsi);
- clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
- } else {
+ if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
+ ice_dis_vf_qs(vf);
+ else
/* Call Disable LAN Tx queue AQ call even when queues are not
- * enabled. This is needed for successful completiom of VFR
+ * enabled. This is needed for successful completion of VFR
*/
ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
NULL, ICE_VF_RESET, vf->vf_id, NULL);
- }
hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure
@@ -1145,12 +1187,14 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
* poll the status register to make sure that the reset
* completed successfully.
*/
- usleep_range(10000, 20000);
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (reg & VPGEN_VFRSTAT_VFRD_M) {
rsd = true;
break;
}
+
+ /* only sleep if the reset is not done */
+ usleep_range(10, 20);
}
/* Display a warning if VF didn't manage to reset in time, but need to
@@ -1160,8 +1204,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id);
- usleep_range(10000, 20000);
-
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
*/
@@ -1183,7 +1225,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_cleanup_and_realloc_vf(vf);
ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@@ -1257,7 +1298,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
-
+ set_bit(__ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
@@ -1283,19 +1324,16 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
-
- /* Set this state so that PF driver does VF vector assignment */
- set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
}
pf->num_alloc_vfs = num_alloc_vfs;
- /* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, true)) {
+ /* VF resources get allocated with initialization */
+ if (!ice_config_res_vfs(pf)) {
ret = -EIO;
goto err_unroll_sriov;
}
- goto err_unroll_intr;
+ return ret;
err_unroll_sriov:
pf->vf = NULL;
@@ -1307,6 +1345,7 @@ err_pci_disable_sriov:
err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
+ clear_bit(__ICE_OICR_INTR_DIS, pf->state);
return ret;
}
@@ -1490,10 +1529,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
- if (aq_ret) {
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
dev_info(&pf->pdev->dev,
- "Unable to send the message to VF %d aq_err %d\n",
- vf->vf_id, pf->hw.mailboxq.sq_last_status);
+ "Unable to send the message to VF %d ret %d aq_err %d\n",
+ vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO;
}
@@ -1688,6 +1727,21 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
}
/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
* ice_vc_config_rss_key
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1712,18 +1766,18 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1759,18 +1813,18 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1839,6 +1893,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1855,6 +1911,12 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
+ vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1865,12 +1927,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
- if (ice_vsi_start_rx_rings(vsi))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ set_bit(vf_q_id, vf->rxq_ena);
+ vf->num_qs_ena++;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ set_bit(vf_q_id, vf->txq_ena);
+ vf->num_qs_ena++;
+ }
/* Set flag to indicate that queues are enabled */
if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
@@ -1893,9 +1991,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1910,29 +2010,81 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
+ vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop tx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_ring *ring = vsi->tx_rings[vf_q_id];
+ struct ice_txq_meta txq_meta = { 0 };
+
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
+ ring, &txq_meta)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop Tx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->txq_ena);
+ vf->num_qs_ena--;
+ }
}
- if (ice_vsi_stop_rx_rings(vsi)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ if (vqs->rx_queues) {
+ q_map = vqs->rx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ vf->num_qs_ena--;
+ }
}
/* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
@@ -1962,12 +2114,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
irqmap_info = (struct virtchnl_irq_map_info *)msg;
num_q_vectors_mapped = irqmap_info->num_vectors;
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
/* Check to make sure number of VF vectors mapped is not greater than
* number of VF vectors originally allocated, and check that
* there is actually at least a single VF queue vector mapped
@@ -1979,6 +2125,12 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < num_q_vectors_mapped; i++) {
struct ice_q_vector *q_vector;
@@ -2056,6 +2208,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
+ u16 num_rxq = 0, num_txq = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
@@ -2071,13 +2224,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi)
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
+ }
- if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
+ if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(&pf->pdev->dev,
"VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, qci->num_queue_pairs);
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2087,37 +2243,52 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* copy Tx queue info from VF into VSI */
- vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[i]->count = qpi->txq.ring_len;
- /* copy Rx queue info from VF into VSI */
- vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
- vsi->rx_rings[i]->count = qpi->rxq.ring_len;
- if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+ if (qpi->txq.ring_len > 0) {
+ num_txq++;
+ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[i]->count = qpi->txq.ring_len;
}
- vsi->rx_buf_len = qpi->rxq.databuffer_size;
- if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
- qpi->rxq.max_pkt_size < 64) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ num_rxq++;
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
+ if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+ qpi->rxq.max_pkt_size < 64) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
}
+
vsi->max_frame = qpi->rxq.max_pkt_size;
}
/* VF can request to configure less than allocated queues
* or default allocated queues. So update the VSI with new number
*/
- vsi->num_txq = qci->num_queue_pairs;
- vsi->num_rxq = qci->num_queue_pairs;
+ vsi->num_txq = num_txq;
+ vsi->num_rxq = num_rxq;
/* All queues of VF VSI are in TC 0 */
- vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
- vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
+ vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
+ vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
@@ -2171,7 +2342,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- LIST_HEAD(mac_list);
+ enum ice_status status;
struct ice_vsi *vsi;
int mac_count = 0;
int i;
@@ -2245,33 +2416,32 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
- /* get here if maddr is multicast or if VF can change MAC */
- if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ /* program the updated filter list */
+ status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
+ if (status == ICE_ERR_DOES_NOT_EXIST ||
+ status == ICE_ERR_ALREADY_EXISTS) {
+ dev_info(&pf->pdev->dev,
+ "can't %s MAC filters %pM for VF %d, error %d\n",
+ set ? "add" : "remove", maddr, vf->vf_id,
+ status);
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "can't %s MAC filters for VF %d, error %d\n",
+ set ? "add" : "remove", vf->vf_id, status);
+ v_ret = ice_err_to_virt_err(status);
goto handle_mac_exit;
}
+
mac_count++;
}
- /* program the updated filter list */
+ /* Track number of MAC filters programmed for the VF VSI */
if (set)
- v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
+ vf->num_mac += mac_count;
else
- v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
-
- if (v_ret) {
- dev_err(&pf->pdev->dev,
- "can't update MAC filters for VF %d, error %d\n",
- vf->vf_id, v_ret);
- } else {
- if (set)
- vf->num_mac += mac_count;
- else
- vf->num_mac -= mac_count;
- }
+ vf->num_mac -= mac_count;
handle_mac_exit:
- ice_free_fltr_list(&pf->pdev->dev, &mac_list);
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
}
@@ -2315,11 +2485,11 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
- int req_queues = vfres->num_queue_pairs;
+ u16 req_queues = vfres->num_queue_pairs;
struct ice_pf *pf = vf->pf;
- int max_allowed_vf_queues;
- int tx_rx_queue_left;
- int cur_queues;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ u16 cur_queues;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2327,29 +2497,30 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
}
cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ tx_rx_queue_left = min_t(u16, pf->q_left_tx, pf->q_left_rx);
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (req_queues <= 0) {
+ if (!req_queues) {
dev_err(&pf->pdev->dev,
- "VF %d tried to request %d queues. Ignoring.\n",
- vf->vf_id, req_queues);
+ "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
- } else if (req_queues - cur_queues > tx_rx_queue_left) {
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
- "VF %d requested %d more queues, but only %d left.\n",
+ "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
ice_vc_dis_vf(vf);
dev_info(&pf->pdev->dev,
- "VF %d granted request of %d queues.\n",
+ "VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
return 0;
}
@@ -2731,20 +2902,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
err = -EPERM;
else
err = -EINVAL;
- goto error_handler;
- }
-
- /* Perform additional checks specific to RSS and Virtchnl */
- if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
- struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
- err = -EINVAL;
- } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
-
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
- err = -EINVAL;
}
error_handler:
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index c3ca522c245a..0d9880c8bba3 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -15,26 +15,38 @@
#define ICE_MAX_MACADDR_PER_VF 12
/* Malicious Driver Detection */
-#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
+#define ICE_MDD_EVENTS_THRESHOLD 30
/* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA
#define VF_TRANS_PENDING_M 0x20
+/* wait defines for polling PF_PCI_CIAD register status */
+#define ICE_PCI_CIAD_WAIT_COUNT 100
+#define ICE_PCI_CIAD_WAIT_DELAY_US 1
+
+/* VF resources default values and limitation */
+#define ICE_MAX_VF_COUNT 256
+#define ICE_MAX_QS_PER_VF 256
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_DFLT_QS_PER_VF 4
+#define ICE_NONQ_VECS_VF 1
+#define ICE_MAX_SCATTER_QS_PER_VF 16
+#define ICE_MAX_BASE_QS_PER_VF 16
+#define ICE_MAX_INTR_PER_VF 65
+#define ICE_MAX_POLICY_INTR_PER_VF 33
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+
/* Specific VF states */
enum ice_vf_states {
- ICE_VF_STATE_INIT = 0,
- ICE_VF_STATE_ACTIVE,
- ICE_VF_STATE_ENA,
+ ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
+ ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
+ ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
ICE_VF_STATE_DIS,
ICE_VF_STATE_MC_PROMISC,
ICE_VF_STATE_UC_PROMISC,
- /* state to indicate if PF needs to do vector assignment for VF.
- * This needs to be set during first time VF initialization or later
- * when VF asks for more Vectors through virtchnl OP.
- */
- ICE_VF_STATE_CFG_INTR,
ICE_VF_STATES_NBITS
};
@@ -50,11 +62,14 @@ struct ice_vf {
s16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
- int first_vector_idx; /* first vector index of this VF */
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr;
+ DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
u16 port_vlan_id;
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1;
@@ -77,6 +92,7 @@ struct ice_vf {
u16 num_mac;
u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
+ u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
};
#ifdef CONFIG_PCI_IOV
@@ -103,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+
+void ice_set_vf_state_qs_dis(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
#define ice_vc_process_vf_msg(pf, event) do {} while (0)
#define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0)
+#define ice_set_vf_state_qs_dis(vf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b4df3e319467..105b0624081a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4731,8 +4731,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -5918,7 +5917,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -6074,7 +6073,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
@@ -8879,8 +8879,7 @@ static int __maybe_unused igb_resume(struct device *dev)
static int __maybe_unused igb_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
if (!igb_has_link(adapter))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 34cd30d7162f..0f2b68f4bb0f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2174,7 +2174,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
count++;
i++;
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 59258d791106..db289bcce21d 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
ctrl = rd32(IGC_CTRL);
hw_dbg("Issuing a global reset to MAC\n");
- wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
ret_val = igc_get_auto_rd_done(hw);
if (ret_val) {
@@ -209,6 +209,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
switch (hw->device_id) {
case IGC_DEV_ID_I225_LM:
case IGC_DEV_ID_I225_V:
+ case IGC_DEV_ID_I225_I:
+ case IGC_DEV_ID_I220_V:
+ case IGC_DEV_ID_I225_K:
mac->type = igc_i225;
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index fc0ccfe38a20..11b99acf4abe 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -54,7 +54,7 @@
#define IGC_ERR_SWFW_SYNC 13
/* Device Control */
-#define IGC_CTRL_RST 0x04000000 /* Global reset */
+#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */
#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 1039a224ac80..abb2d72911ff 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -18,6 +18,9 @@
#define IGC_DEV_ID_I225_LM 0x15F2
#define IGC_DEV_ID_I225_V 0x15F3
+#define IGC_DEV_ID_I225_I 0x15F8
+#define IGC_DEV_ID_I220_V 0x15F7
+#define IGC_DEV_ID_I225_K 0x3100
#define IGC_FUNC_0 0
@@ -151,16 +154,10 @@ struct igc_phy_info {
u16 autoneg_advertised;
u16 autoneg_mask;
- u16 cable_length;
- u16 max_cable_length;
- u16 min_cable_length;
- u16 pair_length[4];
u8 mdix;
- bool disable_polarity_correction;
bool is_mdix;
- bool polarity_correction;
bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
@@ -190,12 +187,7 @@ struct igc_fc_info {
};
struct igc_dev_spec_base {
- bool global_device_reset;
- bool eee_disable;
bool clear_semaphore_once;
- bool module_plugged;
- u8 media_port;
- bool mas_capable;
};
struct igc_hw {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index aa9323e55406..251552855c40 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -36,6 +36,9 @@ static const struct igc_info *igc_info_tbl[] = {
static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
/* required last entry */
{0, }
};
@@ -349,8 +352,7 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -861,7 +863,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
struct igc_tx_buffer *tx_buffer;
union igc_adv_tx_desc *tx_desc;
u32 tx_flags = first->tx_flags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
@@ -1015,7 +1017,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (igc_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index e5ac2d3fd816..0940a0da16f2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1331,9 +1331,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 50dfb02fa34c..171cdc552961 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -190,22 +190,12 @@ static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
{
const char *name = pci_name(adapter->pdev);
- struct dentry *pfile;
+
adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root);
- if (adapter->ixgbe_dbg_adapter) {
- pfile = debugfs_create_file("reg_ops", 0600,
- adapter->ixgbe_dbg_adapter, adapter,
- &ixgbe_dbg_reg_ops_fops);
- if (!pfile)
- e_dev_err("debugfs reg_ops for %s failed\n", name);
- pfile = debugfs_create_file("netdev_ops", 0600,
- adapter->ixgbe_dbg_adapter, adapter,
- &ixgbe_dbg_netdev_ops_fops);
- if (!pfile)
- e_dev_err("debugfs netdev_ops for %s failed\n", name);
- } else {
- e_dev_err("debugfs entry for %s failed\n", name);
- }
+ debugfs_create_file("reg_ops", 0600, adapter->ixgbe_dbg_adapter,
+ adapter, &ixgbe_dbg_reg_ops_fops);
+ debugfs_create_file("netdev_ops", 0600, adapter->ixgbe_dbg_adapter,
+ adapter, &ixgbe_dbg_netdev_ops_fops);
}
/**
@@ -224,8 +214,6 @@ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
void ixgbe_dbg_init(void)
{
ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL);
- if (ixgbe_dbg_root == NULL)
- pr_err("init of debugfs failed\n");
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7882148abb43..17b7ae9f46ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1785,7 +1785,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
unsigned int pull_len;
@@ -1807,7 +1807,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
/* update all of the pointers */
skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
+ skb_frag_off_add(frag, pull_len);
skb->data_len -= pull_len;
skb->tail += pull_len;
}
@@ -1840,11 +1840,11 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
skb_headlen(skb),
DMA_FROM_DEVICE);
} else {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
- frag->page_offset,
+ skb_frag_off(frag),
skb_frag_size(frag),
DMA_FROM_DEVICE);
}
@@ -8183,7 +8183,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -8602,7 +8602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d2b41f9f87f8..8c011d4ce7a9 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3949,7 +3949,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -4134,8 +4134,11 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+
+ count += TXD_USE_COUNT(skb_frag_size(frag));
+ }
#else
count += skb_shinfo(skb)->nr_frags;
#endif
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 0b668357db4d..6d52cf5ce20e 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2030,23 +2030,22 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
- const struct skb_frag_struct *frag;
u32 len;
int ret = 0;
for (i = 0 ; i < nr_frags ; ++i) {
- frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
- skb_frag_page(frag),
- frag->page_offset, skb_frag_size(frag), hidma);
+ skb_frag_page(frag), skb_frag_off(frag),
+ skb_frag_size(frag), hidma);
if (ret) {
jme_drop_tx_map(jme, idx, i);
goto out;
}
-
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
@@ -3193,8 +3192,7 @@ jme_shutdown(struct pci_dev *pdev)
static int
jme_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct jme_adapter *jme = netdev_priv(netdev);
if (!netif_running(netdev))
@@ -3236,8 +3234,7 @@ jme_suspend(struct device *dev)
static int
jme_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct jme_adapter *jme = netdev_priv(netdev);
if (!netif_running(netdev))
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index cda641ef89af..900affbdcc0e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -458,17 +458,11 @@ static int xrx200_probe(struct platform_device *pdev)
}
priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
- if (priv->chan_rx.dma.irq < 0) {
- dev_err(dev, "failed to get RX IRQ, %i\n",
- priv->chan_rx.dma.irq);
+ if (priv->chan_rx.dma.irq < 0)
return -ENOENT;
- }
priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
- if (priv->chan_tx.dma.irq < 0) {
- dev_err(dev, "failed to get TX IRQ, %i\n",
- priv->chan_tx.dma.irq);
+ if (priv->chan_tx.dma.irq < 0)
return -ENOENT;
- }
/* get the clock */
priv->clk = devm_clk_get(dev, NULL);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 88ea5ac83c93..82ea55ae5053 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -659,7 +659,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
+ if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
return 1;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 895bfed26a8a..e49820675c8c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2350,10 +2350,10 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvneta_txq_next_desc_get(txq);
- tx_desc->data_size = frag->size;
+ tx_desc->data_size = skb_frag_size(frag);
tx_desc->buf_phys_addr =
dma_map_single(pp->dev->dev.parent, addr,
@@ -4469,7 +4469,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
/* Device initialization routine */
static int mvneta_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *bm_node;
struct mvneta_port *pp;
@@ -4553,8 +4552,7 @@ static int mvneta_probe(struct platform_device *pdev)
if (!IS_ERR(pp->clk_bus))
clk_prepare_enable(pp->clk_bus);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pp->base = devm_ioremap_resource(&pdev->dev, res);
+ pp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pp->base)) {
err = PTR_ERR(pp->base);
goto err_clk;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 82ee2bcca6fd..46c942ef2287 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -411,15 +411,13 @@ static int mvneta_bm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct mvneta_bm *priv;
- struct resource *res;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 4d9564ba68f6..ee3bab508ee8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -829,9 +829,8 @@ struct mvpp2_pcpu_stats {
/* Per-CPU port control */
struct mvpp2_port_pcpu {
struct hrtimer tx_done_timer;
+ struct net_device *dev;
bool timer_scheduled;
- /* Tasklet for egress finalization */
- struct tasklet_struct tx_done_tasklet;
};
struct mvpp2_queue_vector {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 274fb07362cb..4a3baa7e0142 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -452,8 +452,6 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
- if (IS_ERR(port_dir))
- return PTR_ERR(port_dir);
port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
@@ -480,8 +478,6 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
sprintf(flow_entry_name, "%02d", flow);
flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
- if (!flow_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->flow_entries[flow];
@@ -514,8 +510,6 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
flow_dir = debugfs_create_dir("flows", parent);
- if (!flow_dir)
- return -ENOMEM;
for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
@@ -539,8 +533,6 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
sprintf(prs_entry_name, "%03d", tid);
prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
- if (!prs_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->prs_entries[tid];
@@ -578,8 +570,6 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
prs_dir = debugfs_create_dir("parser", parent);
- if (!prs_dir)
- return -ENOMEM;
for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
@@ -688,8 +678,6 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
- if (IS_ERR(port_dir))
- return PTR_ERR(port_dir);
debugfs_create_file("parser_entries", 0444, port_dir, port,
&mvpp2_dbgfs_port_parser_fops);
@@ -716,15 +704,10 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
int ret, i;
mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
- if (!mvpp2_root) {
+ if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
- if (IS_ERR(mvpp2_root))
- return;
- }
mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
- if (IS_ERR(mvpp2_dir))
- return;
priv->dbgfs_dir = mvpp2_dir;
priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ccdd47f3b8fb..12e799e99803 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2651,31 +2651,21 @@ handled:
return IRQ_HANDLED;
}
-static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
-{
- ktime_t interval;
-
- if (!port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
- hrtimer_start(&port_pcpu->tx_done_timer, interval,
- HRTIMER_MODE_REL_PINNED);
- }
-}
-
-static void mvpp2_tx_proc_cb(unsigned long data)
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
{
- struct net_device *dev = (struct net_device *)data;
- struct mvpp2_port *port = netdev_priv(dev);
+ struct net_device *dev;
+ struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
unsigned int tx_todo, cause;
- port_pcpu = per_cpu_ptr(port->pcpu,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
+ port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
+ dev = port_pcpu->dev;
if (!netif_running(dev))
- return;
+ return HRTIMER_NORESTART;
+
port_pcpu->timer_scheduled = false;
+ port = netdev_priv(dev);
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
@@ -2683,18 +2673,13 @@ static void mvpp2_tx_proc_cb(unsigned long data)
mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
- if (tx_todo)
- mvpp2_timer_set(port_pcpu);
-}
-
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
-{
- struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
- struct mvpp2_port_pcpu,
- tx_done_timer);
-
- tasklet_schedule(&port_pcpu->tx_done_tasklet);
+ if (tx_todo && !port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_forward_now(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ return HRTIMER_RESTART;
+ }
return HRTIMER_NORESTART;
}
@@ -2923,14 +2908,15 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
- mvpp2_txdesc_size_set(port, tx_desc, frag->size);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
goto cleanup;
@@ -3181,7 +3167,12 @@ out:
txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
- mvpp2_timer_set(port_pcpu);
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS,
+ HRTIMER_MODE_REL_PINNED_SOFT);
+ }
}
if (test_bit(thread, &port->priv->lock_map))
@@ -3618,7 +3609,6 @@ static int mvpp2_stop(struct net_device *dev)
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
- tasklet_kill(&port_pcpu->tx_done_tasklet);
}
}
mvpp2_cleanup_rxqs(port);
@@ -5010,7 +5000,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node = to_of_node(port_fwnode);
netdev_features_t features;
struct net_device *dev;
- struct resource *res;
struct phylink *phylink;
char *mac_from = "";
unsigned int ntxqs, nrxqs, thread;
@@ -5114,8 +5103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->comphy = comphy;
if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
- port->base = devm_ioremap_resource(&pdev->dev, res);
+ port->base = devm_platform_ioremap_resource(pdev, 2 + id);
if (IS_ERR(port->base)) {
err = PTR_ERR(port->base);
goto err_free_irq;
@@ -5184,13 +5172,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
port_pcpu->timer_scheduled = false;
-
- tasklet_init(&port_pcpu->tx_done_tasklet,
- mvpp2_tx_proc_cb,
- (unsigned long)dev);
+ port_pcpu->dev = dev;
}
}
@@ -5544,14 +5529,12 @@ static int mvpp2_probe(struct platform_device *pdev)
if (priv->hw_version == MVPP21)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->lms_base))
return PTR_ERR(priv->lms_base);
} else {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3aa998797bc1..51b77c2de400 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1425,8 +1425,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->dev = dev;
pep->clk = clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pep->base = devm_ioremap_resource(&pdev->dev, res);
+ pep->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pep->base)) {
err = -ENOMEM;
goto err_netdev;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9ac854c2b371..0a2ec387a482 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3731,7 +3731,6 @@ static int skge_device_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct skge_port *skge;
- struct dentry *d;
if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
goto done;
@@ -3739,33 +3738,20 @@ static int skge_device_event(struct notifier_block *unused,
skge = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
- if (skge->debugfs) {
- d = debugfs_rename(skge_debug, skge->debugfs,
- skge_debug, dev->name);
- if (d)
- skge->debugfs = d;
- else {
- netdev_info(dev, "rename failed\n");
- debugfs_remove(skge->debugfs);
- }
- }
+ if (skge->debugfs)
+ skge->debugfs = debugfs_rename(skge_debug,
+ skge->debugfs,
+ skge_debug, dev->name);
break;
case NETDEV_GOING_DOWN:
- if (skge->debugfs) {
- debugfs_remove(skge->debugfs);
- skge->debugfs = NULL;
- }
+ debugfs_remove(skge->debugfs);
+ skge->debugfs = NULL;
break;
case NETDEV_UP:
- d = debugfs_create_file(dev->name, 0444,
- skge_debug, dev,
- &skge_debug_fops);
- if (!d || IS_ERR(d))
- netdev_info(dev, "debugfs create failed\n");
- else
- skge->debugfs = d;
+ skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug,
+ dev, &skge_debug_fops);
break;
}
@@ -3780,15 +3766,8 @@ static struct notifier_block skge_notifier = {
static __init void skge_debug_init(void)
{
- struct dentry *ent;
+ skge_debug = debugfs_create_dir("skge", NULL);
- ent = debugfs_create_dir("skge", NULL);
- if (!ent || IS_ERR(ent)) {
- pr_info("debugfs create directory failed\n");
- return;
- }
-
- skge_debug = ent;
register_netdevice_notifier(&skge_notifier);
}
@@ -4078,8 +4057,7 @@ static void skge_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int skge_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct skge_hw *hw = pci_get_drvdata(pdev);
+ struct skge_hw *hw = dev_get_drvdata(dev);
int i;
if (!hw)
@@ -4103,8 +4081,7 @@ static int skge_suspend(struct device *dev)
static int skge_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct skge_hw *hw = pci_get_drvdata(pdev);
+ struct skge_hw *hw = dev_get_drvdata(dev);
int i, err;
if (!hw)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e0363870f3a5..5f56ee83e3b1 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5174,8 +5174,7 @@ static void sky2_remove(struct pci_dev *pdev)
static int sky2_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct sky2_hw *hw = pci_get_drvdata(pdev);
+ struct sky2_hw *hw = dev_get_drvdata(dev);
int i;
if (!hw)
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 1f7fff81f24d..4968352ba188 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek ethernet driver"
- depends on ARCH_MEDIATEK || SOC_MT7621
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
---help---
If you have a Mediatek SoC with ethernet, say Y.
@@ -9,7 +9,7 @@ if NET_VENDOR_MEDIATEK
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
- select PHYLIB
+ select PHYLINK
---help---
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 7f05880cf9ef..ef11cf3d1ccc 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -239,10 +239,9 @@ out:
return err;
}
-static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
- unsigned int val = 0;
- int sid, err, path;
+ int err, path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
@@ -252,33 +251,10 @@ static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
if (err)
return err;
- /* The path GMAC to SGMII will be enabled once the SGMIISYS is being
- * setup done.
- */
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
-
- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
- SYSCFG0_SGMII_MASK, ~(u32)SYSCFG0_SGMII_MASK);
-
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? 0 : mac_id;
-
- /* Setup SGMIISYS with the determined property */
- if (MTK_HAS_FLAGS(eth->sgmii->flags[sid], MTK_SGMII_PHYSPEED_AN))
- err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
- else
- err = mtk_sgmii_setup_mode_force(eth->sgmii, sid);
-
- if (err)
- return err;
-
- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
- SYSCFG0_SGMII_MASK, val);
-
return 0;
}
-static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
int err, path = 0;
@@ -296,7 +272,7 @@ static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
return 0;
}
-static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int err, path;
@@ -311,42 +287,3 @@ static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
return 0;
}
-int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode)
-{
- int err;
-
- switch (phymode) {
- case PHY_INTERFACE_MODE_TRGMII:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_RMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
- err = mtk_gmac_rgmii_path_setup(eth, mac_id);
- if (err)
- return err;
- }
- break;
- case PHY_INTERFACE_MODE_SGMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- err = mtk_gmac_sgmii_path_setup(eth, mac_id);
- if (err)
- return err;
- }
- break;
- case PHY_INTERFACE_MODE_GMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
- err = mtk_gmac_gephy_path_setup(eth, mac_id);
- if (err)
- return err;
- }
- break;
- default:
- break;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c39d7f4ab1d4..c61069340f4f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -18,6 +18,7 @@
#include <linux/tcp.h>
#include <linux/interrupt.h>
#include <linux/pinctrl/devinfo.h>
+#include <linux/phylink.h>
#include "mtk_eth_soc.h"
@@ -186,165 +187,339 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
mtk_w32(eth, val, TRGMII_TCK_CTRL);
}
-static void mtk_phy_link_adjust(struct net_device *dev)
+static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct mtk_mac *mac = netdev_priv(dev);
- u16 lcl_adv = 0, rmt_adv = 0;
- u8 flowctrl;
- u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
- MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
- MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
- MAC_MCR_BACKPR_EN;
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new, sid;
+ int val, ge_mode, err;
+
+ /* MT76x8 has no hardware settings between for the MAC */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ mac->interface != state->interface) {
+ /* Setup soc pin functions */
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ if (mac->id)
+ goto err_phy;
+ if (!MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_GMAC1_TRGMII))
+ goto err_phy;
+ /* fall through */
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
+ err = mtk_gmac_rgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ err = mtk_gmac_sgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
+ err = mtk_gmac_gephy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ default:
+ goto err_phy;
+ }
- if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ /* Setup clock for 1st gmac */
+ if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_TRGMII_MT7621_CLK)) {
+ if (mt7621_gmac0_rgmii_adjust(mac->hw,
+ state->interface))
+ goto err_phy;
+ } else {
+ if (state->interface !=
+ PHY_INTERFACE_MODE_TRGMII)
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->speed);
+ }
+ }
+
+ ge_mode = 0;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ ge_mode = 1;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ ge_mode = 2;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (mac->id)
+ goto err_phy;
+ ge_mode = 3;
+ break;
+ default:
+ break;
+ }
+
+ /* put the gmac into the right mode */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
+ val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+ mac->interface = state->interface;
+ }
+
+ /* SGMII */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)) {
+ /* The path GMAC to SGMII will be enabled once the SGMIISYS is
+ * being setup done.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK,
+ ~(u32)SYSCFG0_SGMII_MASK);
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ /* Setup SGMIISYS with the determined property */
+ if (state->interface != PHY_INTERFACE_MODE_SGMII)
+ err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
+ state);
+ else if (phylink_autoneg_inband(mode))
+ err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
+
+ if (err)
+ goto init_err;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+ } else if (phylink_autoneg_inband(mode)) {
+ dev_err(eth->dev,
+ "In-band mode not supported in non SGMII mode!\n");
return;
+ }
- switch (dev->phydev->speed) {
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
+ MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
+ MAC_MCR_FORCE_RX_FC);
+ mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+
+ switch (state->speed) {
+ case SPEED_2500:
case SPEED_1000:
- mcr |= MAC_MCR_SPEED_1000;
+ mcr_new |= MAC_MCR_SPEED_1000;
break;
case SPEED_100:
- mcr |= MAC_MCR_SPEED_100;
+ mcr_new |= MAC_MCR_SPEED_100;
break;
}
-
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && !mac->id) {
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
- if (mt7621_gmac0_rgmii_adjust(mac->hw,
- dev->phydev->interface))
- return;
- } else {
- if (!mac->trgmii)
- mtk_gmac0_rgmii_adjust(mac->hw,
- dev->phydev->speed);
- }
+ if (state->duplex == DUPLEX_FULL) {
+ mcr_new |= MAC_MCR_FORCE_DPX;
+ if (state->pause & MLO_PAUSE_TX)
+ mcr_new |= MAC_MCR_FORCE_TX_FC;
+ if (state->pause & MLO_PAUSE_RX)
+ mcr_new |= MAC_MCR_FORCE_RX_FC;
}
- if (dev->phydev->link)
- mcr |= MAC_MCR_FORCE_LINK;
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
- if (dev->phydev->duplex) {
- mcr |= MAC_MCR_FORCE_DPX;
+ return;
- if (dev->phydev->pause)
- rmt_adv = LPA_PAUSE_CAP;
- if (dev->phydev->asym_pause)
- rmt_adv |= LPA_PAUSE_ASYM;
+err_phy:
+ dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
+ mac->id, phy_modes(state->interface));
+ return;
- lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+init_err:
+ dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
+ mac->id, phy_modes(state->interface), err);
+}
- if (flowctrl & FLOW_CTRL_TX)
- mcr |= MAC_MCR_FORCE_TX_FC;
- if (flowctrl & FLOW_CTRL_RX)
- mcr |= MAC_MCR_FORCE_RX_FC;
+static int mtk_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
- netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
- flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
- flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ state->link = (pmsr & MAC_MSR_LINK);
+ state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
+
+ switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case MAC_MSR_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MAC_MSR_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
}
- mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (pmsr & MAC_MSR_RX_FC)
+ state->pause |= MLO_PAUSE_RX;
+ if (pmsr & MAC_MSR_TX_FC)
+ state->pause |= MLO_PAUSE_TX;
- if (!of_phy_is_fixed_link(mac->of_node))
- phy_print_status(dev->phydev);
+ return 1;
}
-static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *phy_node)
+static void mtk_mac_an_restart(struct phylink_config *config)
{
- struct phy_device *phydev;
- int phy_mode;
-
- phy_mode = of_get_phy_mode(phy_node);
- if (phy_mode < 0) {
- dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
- return -EINVAL;
- }
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
- phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
- mtk_phy_link_adjust, 0, phy_mode);
- if (!phydev) {
- dev_err(eth->dev, "could not connect to PHY\n");
- return -ENODEV;
- }
+ mtk_sgmii_restart_an(mac->hw, mac->id);
+}
- dev_info(eth->dev,
- "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
- mac->id, phydev_name(phydev), phydev->phy_id,
- phydev->drv->name);
+static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- return 0;
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static int mtk_phy_connect(struct net_device *dev)
+static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy)
{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth;
- struct device_node *np;
- u32 val;
- int err;
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- eth = mac->hw;
- np = of_parse_phandle(mac->of_node, "phy-handle", 0);
- if (!np && of_phy_is_fixed_link(mac->of_node))
- if (!of_phy_register_fixed_link(mac->of_node))
- np = of_node_get(mac->of_node);
- if (!np)
- return -ENODEV;
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
- err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np));
- if (err)
- goto err_phy;
+static void mtk_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
+ phy_interface_mode_is_rgmii(state->interface)) &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
+ !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
+ (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)))) {
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
- mac->ge_mode = 0;
- switch (of_get_phy_mode(np)) {
+ switch (state->interface) {
case PHY_INTERFACE_MODE_TRGMII:
- mac->trgmii = true;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ break;
+ case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 1000baseT_Half);
+ /* fall through */
case PHY_INTERFACE_MODE_SGMII:
- break;
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ /* fall through */
case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_GMII:
- mac->ge_mode = 1;
- break;
- case PHY_INTERFACE_MODE_REVMII:
- mac->ge_mode = 2;
- break;
case PHY_INTERFACE_MODE_RMII:
- if (!mac->id)
- goto err_phy;
- mac->ge_mode = 3;
- break;
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_NA:
default:
- goto err_phy;
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ break;
}
- /* put the gmac into the right mode */
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
- val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
- val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
-
- /* couple phydev to net_device */
- if (mtk_phy_connect_node(eth, mac, np))
- goto err_phy;
+ if (state->interface == PHY_INTERFACE_MODE_NA) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+ }
- of_node_put(np);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
- return 0;
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
-err_phy:
- if (of_phy_is_fixed_link(mac->of_node))
- of_phy_deregister_fixed_link(mac->of_node);
- of_node_put(np);
- dev_err(eth->dev, "%s: invalid phy\n", __func__);
- return -EINVAL;
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
}
+static const struct phylink_mac_ops mtk_phylink_ops = {
+ .validate = mtk_validate,
+ .mac_link_state = mtk_mac_link_state,
+ .mac_an_restart = mtk_mac_an_restart,
+ .mac_config = mtk_mac_config,
+ .mac_link_down = mtk_mac_link_down,
+ .mac_link_up = mtk_mac_link_up,
+};
+
static int mtk_mdio_init(struct mtk_eth *eth)
{
struct device_node *mii_np;
@@ -395,8 +570,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -406,8 +581,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -437,6 +612,7 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
{
int ret = eth_mac_addr(dev, p);
struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
const char *macaddr = dev->dev_addr;
if (ret)
@@ -446,11 +622,19 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
return -EBUSY;
spin_lock_bh(&mac->hw->page_lock);
- mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
- MTK_GDMA_MAC_ADRH(mac->id));
- mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
- (macaddr[4] << 8) | macaddr[5],
- MTK_GDMA_MAC_ADRL(mac->id));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MT7628_SDM_MAC_ADRH);
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MT7628_SDM_MAC_ADRL);
+ } else {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MTK_GDMA_MAC_ADRH(mac->id));
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MTK_GDMA_MAC_ADRL(mac->id));
+ }
spin_unlock_bh(&mac->hw->page_lock);
return 0;
@@ -626,19 +810,47 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx];
}
+static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *dma)
+{
+ return ring->dma_pdma - ring->dma + dma;
+}
+
+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+{
+ return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+}
+
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{
- if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(eth->dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(eth->dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+ dma_unmap_single(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+ } else {
+ if (dma_unmap_len(tx_buf, dma_len0)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_unmap_len(tx_buf, dma_len1)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr1),
+ dma_unmap_len(tx_buf, dma_len1),
+ DMA_TO_DEVICE);
+ }
}
+
tx_buf->flags = 0;
if (tx_buf->skb &&
(tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
@@ -646,19 +858,45 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
tx_buf->skb = NULL;
}
+static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
+ size_t size, int idx)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ } else {
+ if (idx & 1) {
+ txd->txd3 = mapped_addr;
+ txd->txd2 |= TX_DMA_PLEN1(size);
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ }
+ }
+}
+
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *itxd, *txd;
+ struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
u32 txd4 = 0, fport;
+ int k = 0;
itxd = ring->next_free;
+ itxd_pdma = qdma_to_pdma(ring, itxd);
if (itxd == ring->last_free)
return -ENOMEM;
@@ -689,26 +927,37 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
+ setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ k++);
/* TX SG offload */
txd = itxd;
+ txd_pdma = qdma_to_pdma(ring, txd);
nr_frags = skb_shinfo(skb)->nr_frags;
+
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
while (frag_size) {
bool last_frag = false;
unsigned int frag_map_size;
+ bool new_desc = true;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ (i & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto err_dma;
+
+ n_desc++;
+ } else {
+ new_desc = false;
+ }
- txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
- if (txd == ring->last_free)
- goto err_dma;
- n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size,
@@ -727,14 +976,16 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd4, fport);
tx_buf = mtk_desc_to_tx_buf(ring, txd);
- memset(tx_buf, 0, sizeof(*tx_buf));
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
+ setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
+ frag_map_size, k++);
+
frag_size -= frag_map_size;
offset += frag_map_size;
}
@@ -746,6 +997,12 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(itxd->txd4, txd4);
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
(!nr_frags * TX_DMA_LS0)));
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (k & 0x1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
netdev_sent_queue(dev, skb->len);
skb_tx_timestamp(skb);
@@ -758,9 +1015,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
- !netdev_xmit_more())
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+ !netdev_xmit_more())
+ mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ } else {
+ int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
return 0;
@@ -772,7 +1035,11 @@ err_dma:
mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
+ itxd_pdma = qdma_to_pdma(ring, itxd);
} while (itxd != txd);
return -ENOMEM;
@@ -781,13 +1048,14 @@ err_dma:
static inline int mtk_cal_txd_req(struct sk_buff *skb)
{
int i, nfrags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
nfrags = 1;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+ MTK_TX_DMA_BUF_LEN);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -902,7 +1170,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
ring = &eth->rx_ring[i];
- idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
@@ -945,13 +1213,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct net_device *netdev;
unsigned int pktlen;
dma_addr_t dma_addr;
- int mac = 0;
+ int mac;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
- idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx];
data = ring->data[idx];
@@ -960,9 +1228,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
break;
/* find out which mac the packet come from. values start at 1 */
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK;
- mac--;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mac = 0;
+ } else {
+ mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
+ RX_DMA_FPORT_MASK;
+ mac--;
+ }
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -980,7 +1252,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
}
dma_addr = dma_map_single(eth->dev,
- new_data + NET_SKB_PAD,
+ new_data + NET_SKB_PAD +
+ eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
@@ -1003,7 +1276,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
skb_put(skb, pktlen);
- if (trxd.rxd4 & RX_DMA_L4_VALID)
+ if (trxd.rxd4 & eth->rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -1020,7 +1293,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+ else
+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
ring->calc_idx = idx;
@@ -1039,19 +1315,14 @@ rx_done:
return done;
}
-static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma *desc;
struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
- unsigned int done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
u32 cpu, dma;
- int total = 0, i;
-
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
@@ -1089,6 +1360,62 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+ return budget;
+}
+
+static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+ u32 cpu, dma;
+
+ cpu = ring->cpu_idx;
+ dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+
+ while ((cpu != dma) && budget) {
+ tx_buf = &ring->buf[cpu];
+ skb = tx_buf->skb;
+ if (!skb)
+ break;
+
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ bytes[0] += skb->len;
+ done[0]++;
+ budget--;
+ }
+
+ mtk_tx_unmap(eth, tx_buf);
+
+ desc = &ring->dma[cpu];
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
+ }
+
+ ring->cpu_idx = cpu;
+
+ return budget;
+}
+
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ unsigned int done[MTK_MAX_DEVS];
+ unsigned int bytes[MTK_MAX_DEVS];
+ int total = 0, i;
+
+ memset(done, 0, sizeof(done));
+ memset(bytes, 0, sizeof(bytes));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
+ else
+ budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
+
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i] || !done[i])
continue;
@@ -1120,13 +1447,14 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
u32 status, mask;
int tx_done = 0;
- mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ status = mtk_r32(eth, eth->tx_int_status_reg);
+ mask = mtk_r32(eth, eth->tx_int_mask_reg);
dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n",
tx_done, status, mask);
@@ -1135,7 +1463,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
if (tx_done == budget)
return budget;
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ status = mtk_r32(eth, eth->tx_int_status_reg);
if (status & MTK_TX_DONE_INT)
return budget;
@@ -1202,6 +1530,24 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
}
+ /* On MT7688 (PDMA only) this driver uses the ring->dma structs
+ * only as the framework. The real HW descriptors are the PDMA
+ * descriptors in ring->dma_pdma.
+ */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys_pdma,
+ GFP_ATOMIC);
+ if (!ring->dma_pdma)
+ goto no_tx_mem;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
+ ring->dma_pdma[i].txd4 = 0;
+ }
+ }
+
+ ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
@@ -1212,15 +1558,23 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
*/
wmb();
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
- mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_DRX_PTR);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_CRX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_DRX_PTR);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
+ MTK_QTX_CFG(0));
+ } else {
+ mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
+ mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
+ mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ }
return 0;
@@ -1247,6 +1601,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
ring->phys);
ring->dma = NULL;
}
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
+ ring->dma_pdma,
+ ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
}
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
@@ -1294,14 +1656,17 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->data[i] + NET_SKB_PAD,
+ ring->data[i] + NET_SKB_PAD + eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
return -ENOMEM;
ring->dma[i].rxd1 = (unsigned int)dma_addr;
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ ring->dma[i].rxd2 = RX_DMA_LSO;
+ else
+ ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
}
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
@@ -1617,9 +1982,16 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
unsigned long t_start = jiffies;
while (1) {
- if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
- return 0;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ } else {
+ if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ }
+
if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
break;
}
@@ -1636,20 +2008,24 @@ static int mtk_dma_init(struct mtk_eth *eth)
if (mtk_dma_busy_wait(eth))
return -EBUSY;
- /* QDMA needs scratch memory for internal reordering of the
- * descriptors
- */
- err = mtk_init_fq_dma(eth);
- if (err)
- return err;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* QDMA needs scratch memory for internal reordering of the
+ * descriptors
+ */
+ err = mtk_init_fq_dma(eth);
+ if (err)
+ return err;
+ }
err = mtk_tx_alloc(eth);
if (err)
return err;
- err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
- if (err)
- return err;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
+ if (err)
+ return err;
+ }
err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
@@ -1666,10 +2042,14 @@ static int mtk_dma_init(struct mtk_eth *eth)
return err;
}
- /* Enable random early drop and set drop threshold automatically */
- mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
- MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* Enable random early drop and set drop threshold
+ * automatically
+ */
+ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
+ FC_THRES_MIN, MTK_QDMA_FC_THRES);
+ mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ }
return 0;
}
@@ -1745,8 +2125,8 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth)
if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
mtk_handle_irq_rx(irq, _eth);
}
- if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) {
- if (mtk_r32(eth, MTK_QMTK_INT_STATUS) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth);
}
@@ -1778,17 +2158,23 @@ static int mtk_start_dma(struct mtk_eth *eth)
return err;
}
- mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
- MTK_RX_BT_32DWORDS,
- MTK_QDMA_GLO_CFG);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth,
+ MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
+ MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_BT_32DWORDS,
+ MTK_QDMA_GLO_CFG);
- mtk_w32(eth,
- MTK_RX_DMA_EN | rx_2b_offset |
- MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
- MTK_PDMA_GLO_CFG);
+ mtk_w32(eth,
+ MTK_RX_DMA_EN | rx_2b_offset |
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+ MTK_PDMA_GLO_CFG);
+ } else {
+ mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
+ MTK_PDMA_GLO_CFG);
+ }
return 0;
}
@@ -1797,6 +2183,14 @@ static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int err;
+
+ err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
+ if (err) {
+ netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
+ err);
+ return err;
+ }
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
@@ -1814,9 +2208,8 @@ static int mtk_open(struct net_device *dev)
else
refcount_inc(&eth->dma_refcnt);
- phy_start(dev->phydev);
+ phylink_start(mac->phylink);
netif_start_queue(dev);
-
return 0;
}
@@ -1848,8 +2241,11 @@ static int mtk_stop(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ phylink_stop(mac->phylink);
+
netif_tx_disable(dev);
- phy_stop(dev->phydev);
+
+ phylink_disconnect_phy(mac->phylink);
/* only shutdown DMA if this is the last user */
if (!refcount_dec_and_test(&eth->dma_refcnt))
@@ -1860,7 +2256,8 @@ static int mtk_stop(struct net_device *dev)
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
mtk_dma_free(eth);
@@ -1922,17 +2319,26 @@ static int mtk_hw_init(struct mtk_eth *eth)
if (ret)
goto err_disable_pm;
- ethsys_reset(eth, RSTCTRL_FE);
- ethsys_reset(eth, RSTCTRL_PPE);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ ret = device_reset(eth->dev);
+ if (ret) {
+ dev_err(eth->dev, "MAC reset failed!\n");
+ goto err_disable_pm;
+ }
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->mac[i])
- continue;
- val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
- val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
+ /* enable interrupt delay for RX */
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ return 0;
}
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+ /* Non-MT7628 handling... */
+ ethsys_reset(eth, RSTCTRL_FE);
+ ethsys_reset(eth, RSTCTRL_PPE);
if (eth->pctl) {
/* Set GE2 driving and slew rate */
@@ -1946,11 +2352,11 @@ static int mtk_hw_init(struct mtk_eth *eth)
}
/* Set linkdown as the default for each GMAC. Its own MCR would be set
- * up with the more appropriate value when mtk_phy_link_adjust call is
- * being invoked.
+ * up with the more appropriate value when mtk_mac_config call is being
+ * invoked.
*/
for (i = 0; i < MTK_MAC_COUNT; i++)
- mtk_w32(eth, 0, MTK_MAC_MCR(i));
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
/* Indicates CDM to parse the MTK special tag from CPU
* which also is working out for untag packets.
@@ -1978,7 +2384,7 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
/* setup the forward port to send frame to PDMA */
@@ -2030,7 +2436,7 @@ static int __init mtk_init(struct net_device *dev)
dev->dev_addr);
}
- return mtk_phy_connect(dev);
+ return 0;
}
static void mtk_uninit(struct net_device *dev)
@@ -2038,20 +2444,20 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- phy_disconnect(dev->phydev);
- if (of_phy_is_fixed_link(mac->of_node))
- of_phy_deregister_fixed_link(mac->of_node);
+ phylink_disconnect_phy(mac->phylink);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ struct mtk_mac *mac = netdev_priv(dev);
+
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return phylink_mii_ioctl(mac->phylink, ifr, cmd);
default:
break;
}
@@ -2092,16 +2498,6 @@ static void mtk_pending_work(struct work_struct *work)
eth->dev->pins->default_state);
mtk_hw_init(eth);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->mac[i] ||
- of_phy_is_fixed_link(eth->mac[i]->of_node))
- continue;
- err = phy_init_hw(eth->netdev[i]->phydev);
- if (err)
- dev_err(eth->dev, "%s: PHY init failed.\n",
- eth->netdev[i]->name);
- }
-
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!test_bit(i, &restart))
@@ -2164,9 +2560,7 @@ static int mtk_get_link_ksettings(struct net_device *ndev,
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- phy_ethtool_ksettings_get(ndev->phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(mac->phylink, cmd);
}
static int mtk_set_link_ksettings(struct net_device *ndev,
@@ -2177,7 +2571,7 @@ static int mtk_set_link_ksettings(struct net_device *ndev,
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+ return phylink_ethtool_ksettings_set(mac->phylink, cmd);
}
static void mtk_get_drvinfo(struct net_device *dev,
@@ -2211,22 +2605,10 @@ static int mtk_nway_reset(struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- return genphy_restart_aneg(dev->phydev);
-}
+ if (!mac->phylink)
+ return -ENOTSUPP;
-static u32 mtk_get_link(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
-
- if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
- return -EBUSY;
-
- err = genphy_update_link(dev->phydev);
- if (err)
- return ethtool_op_get_link(dev);
-
- return dev->phydev->link;
+ return phylink_ethtool_nway_reset(mac->phylink);
}
static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2346,7 +2728,7 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.get_msglevel = mtk_get_msglevel,
.set_msglevel = mtk_set_msglevel,
.nway_reset = mtk_nway_reset,
- .get_link = mtk_get_link,
+ .get_link = ethtool_op_get_link,
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
@@ -2374,9 +2756,10 @@ static const struct net_device_ops mtk_netdev_ops = {
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
- struct mtk_mac *mac;
const __be32 *_id = of_get_property(np, "reg", NULL);
- int id, err;
+ struct phylink *phylink;
+ int phy_mode, id, err;
+ struct mtk_mac *mac;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -2420,18 +2803,44 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
u64_stats_init(&mac->hw_stats->syncp);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
+ /* phylink create */
+ phy_mode = of_get_phy_mode(np);
+ if (phy_mode < 0) {
+ dev_err(eth->dev, "incorrect phy-mode\n");
+ err = -EINVAL;
+ goto free_netdev;
+ }
+
+ /* mac config is not set */
+ mac->interface = PHY_INTERFACE_MODE_NA;
+ mac->mode = MLO_AN_PHY;
+ mac->speed = SPEED_UNKNOWN;
+
+ mac->phylink_config.dev = &eth->netdev[id]->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&mac->phylink_config,
+ of_fwnode_handle(mac->of_node),
+ phy_mode, &mtk_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto free_netdev;
+ }
+
+ mac->phylink = phylink;
+
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
- eth->netdev[id]->hw_features = MTK_HW_FEATURES;
+ eth->netdev[id]->hw_features = eth->soc->hw_features;
if (eth->hwlro)
eth->netdev[id]->hw_features |= NETIF_F_LRO;
- eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
+ eth->netdev[id]->vlan_features = eth->soc->hw_features &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
- eth->netdev[id]->features |= MTK_HW_FEATURES;
+ eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
eth->netdev[id]->irq = eth->irq[0];
@@ -2446,11 +2855,9 @@ free_netdev:
static int mtk_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct device_node *mac_np;
struct mtk_eth *eth;
- int err;
- int i;
+ int err, i;
eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
if (!eth)
@@ -2459,19 +2866,36 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
- eth->base = devm_ioremap_resource(&pdev->dev, res);
+ eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
+ } else {
+ eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ eth->ip_align = NET_IP_ALIGN;
+ } else {
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
+ }
+
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
spin_lock_init(&eth->rx_irq_lock);
- eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,ethsys");
- if (IS_ERR(eth->ethsys)) {
- dev_err(&pdev->dev, "no ethsys regmap found\n");
- return PTR_ERR(eth->ethsys);
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,ethsys");
+ if (IS_ERR(eth->ethsys)) {
+ dev_err(&pdev->dev, "no ethsys regmap found\n");
+ return PTR_ERR(eth->ethsys);
+ }
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
@@ -2572,9 +2996,12 @@ static int mtk_probe(struct platform_device *pdev)
if (err)
goto err_free_dev;
- err = mtk_mdio_init(eth);
- if (err)
- goto err_free_dev;
+ /* No MT7628/88 support yet */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+ }
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
@@ -2616,6 +3043,7 @@ err_deinit_hw:
static int mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
+ struct mtk_mac *mac;
int i;
/* stop all devices to make sure that dma is properly shut down */
@@ -2623,6 +3051,8 @@ static int mtk_remove(struct platform_device *pdev)
if (!eth->netdev[i])
continue;
mtk_stop(eth->netdev[i]);
+ mac = netdev_priv(eth->netdev[i]);
+ phylink_disconnect_phy(mac->phylink);
}
mtk_hw_deinit(eth);
@@ -2637,12 +3067,14 @@ static int mtk_remove(struct platform_device *pdev)
static const struct mtk_soc_data mt2701_data = {
.caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7621_data = {
.caps = MT7621_CAPS,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
};
@@ -2650,12 +3082,14 @@ static const struct mtk_soc_data mt7621_data = {
static const struct mtk_soc_data mt7622_data = {
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
.caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
@@ -2663,16 +3097,25 @@ static const struct mtk_soc_data mt7623_data = {
static const struct mtk_soc_data mt7629_data = {
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
};
+static const struct mtk_soc_data rt5350_data = {
+ .caps = MT7628_CAPS,
+ .hw_features = MTK_HW_FEATURES_MT7628,
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+};
+
const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index bab94f763e2c..76bd12cb8150 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -14,6 +14,7 @@
#include <linux/of_net.h>
#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
+#include <linux/phylink.h>
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
@@ -39,7 +40,8 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
-#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
+#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
#define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8
@@ -118,6 +120,7 @@
/* PDMA Global Configuration Register */
#define MTK_PDMA_GLO_CFG 0xa04
#define MTK_MULTI_EN BIT(10)
+#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
/* PDMA Reset Index Register */
#define MTK_PDMA_RST_IDX 0xa08
@@ -212,7 +215,7 @@
#define FC_THRES_MIN 0x4444
/* QDMA Interrupt Status Register */
-#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30)
#define MTK_RX_DONE_INT3 BIT(19)
#define MTK_RX_DONE_INT2 BIT(18)
@@ -276,11 +279,18 @@
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
+#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
+/* PDMA on MT7628 */
+#define TX_DMA_DONE BIT(31)
+#define TX_DMA_LS1 BIT(14)
+#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
+
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
+#define RX_DMA_LSO BIT(30)
#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
@@ -289,6 +299,7 @@
/* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24)
+#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_FPORT_SHIFT 19
#define RX_DMA_FPORT_MASK 0x7
@@ -320,12 +331,19 @@
#define MAC_MCR_SPEED_100 BIT(2)
#define MAC_MCR_FORCE_DPX BIT(1)
#define MAC_MCR_FORCE_LINK BIT(0)
-#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | \
- MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
- MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
- MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
- MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
- MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
+#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+
+/* Mac status registers */
+#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
+#define MAC_MSR_EEE1G BIT(7)
+#define MAC_MSR_EEE100M BIT(6)
+#define MAC_MSR_RX_FC BIT(5)
+#define MAC_MSR_TX_FC BIT(4)
+#define MAC_MSR_SPEED_1000 BIT(3)
+#define MAC_MSR_SPEED_100 BIT(2)
+#define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)
+#define MAC_MSR_DPX BIT(1)
+#define MAC_MSR_LINK BIT(0)
/* TRGMII RXC control register */
#define TRGMII_RCK_CTRL 0x10300
@@ -394,14 +412,38 @@
/* Register to auto-negotiation restart */
#define SGMSYS_PCS_CONTROL_1 0x0
#define SGMII_AN_RESTART BIT(9)
+#define SGMII_ISOLATE BIT(10)
+#define SGMII_AN_ENABLE BIT(12)
+#define SGMII_LINK_STATYS BIT(18)
+#define SGMII_AN_ABILITY BIT(19)
+#define SGMII_AN_COMPLETE BIT(21)
+#define SGMII_PCS_FAULT BIT(23)
+#define SGMII_AN_EXPANSION_CLR BIT(30)
/* Register to programmable link timer, the unit in 2 * 8ns */
#define SGMSYS_PCS_LINK_TIMER 0x18
#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
/* Register to control remote fault */
-#define SGMSYS_SGMII_MODE 0x20
-#define SGMII_REMOTE_FAULT_DIS BIT(8)
+#define SGMSYS_SGMII_MODE 0x20
+#define SGMII_IF_MODE_BIT0 BIT(0)
+#define SGMII_SPEED_DUPLEX_AN BIT(1)
+#define SGMII_SPEED_10 0x0
+#define SGMII_SPEED_100 BIT(2)
+#define SGMII_SPEED_1000 BIT(3)
+#define SGMII_DUPLEX_FULL BIT(4)
+#define SGMII_IF_MODE_BIT5 BIT(5)
+#define SGMII_REMOTE_FAULT_DIS BIT(8)
+#define SGMII_CODE_SYNC_SET_VAL BIT(9)
+#define SGMII_CODE_SYNC_SET_EN BIT(10)
+#define SGMII_SEND_AN_ERROR_EN BIT(11)
+#define SGMII_IF_MODE_MASK GENMASK(5, 1)
+
+/* Register to set SGMII speed, ANA RG_ Control Signals III*/
+#define SGMSYS_ANA_RG_CS3 0x2028
+#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
+#define RG_PHY_SPEED_1_25G 0x0
+#define RG_PHY_SPEED_3_125G BIT(2)
/* Register to power up QPHY */
#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
@@ -412,6 +454,19 @@
#define CO_QPHY_SEL BIT(0)
#define GEPHY_MAC_SEL BIT(1)
+/* MT7628/88 specific stuff */
+#define MT7628_PDMA_OFFSET 0x0800
+#define MT7628_SDM_OFFSET 0x0c00
+
+#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00)
+#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04)
+#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08)
+#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c)
+#define MT7628_PST_DTX_IDX0 BIT(0)
+
+#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
+#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -509,6 +564,7 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
+#define MT7628_CLKS_BITMAP (0)
#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
@@ -563,6 +619,10 @@ struct mtk_tx_ring {
struct mtk_tx_dma *last_free;
u16 thresh;
atomic_t free_count;
+ int dma_size;
+ struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */
+ dma_addr_t phys_pdma;
+ int cpu_idx;
};
/* PDMA rx ring mode */
@@ -604,6 +664,8 @@ enum mkt_eth_capabilities {
MTK_HWLRO_BIT,
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
+ MTK_QDMA_BIT,
+ MTK_SOC_MT7628_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -634,6 +696,8 @@ enum mkt_eth_capabilities {
#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT(MTK_QDMA_BIT)
+#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
@@ -687,26 +751,31 @@ enum mkt_eth_capabilities {
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
- MTK_GMAC2_RGMII | MTK_SHARED_INT | MTK_TRGMII_MT7621_CLK)
+ MTK_GMAC2_RGMII | MTK_SHARED_INT | \
+ MTK_TRGMII_MT7621_CLK | MTK_QDMA)
#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
MTK_MUX_GDM1_TO_GMAC1_ESW | \
- MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII)
+ MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
-#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
+#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
+ MTK_QDMA)
+
+#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628)
#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
MTK_MUX_U3_GMAC2_TO_QPHY | \
- MTK_MUX_GMAC12_TO_GEPHY_SGMII)
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
+ * @hw_features Flags shown HW features
* @required_clks Flags shown the bitmap for required clocks on
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
@@ -717,6 +786,7 @@ struct mtk_soc_data {
u32 caps;
u32 required_clks;
bool required_pctl;
+ netdev_features_t hw_features;
};
/* currently no SoC has more than 2 macs */
@@ -810,27 +880,33 @@ struct mtk_eth {
unsigned long state;
const struct mtk_soc_data *soc;
+
+ u32 tx_int_mask_reg;
+ u32 tx_int_status_reg;
+ u32 rx_dma_l4_valid;
+ int ip_align;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
* SoC
* @id: The number of the MAC
- * @ge_mode: Interface mode kept for setup restoring
+ * @interface: Interface mode kept for detecting change in hw settings
* @of_node: Our devicetree node
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
- * @trgmii Indicate if the MAC uses TRGMII connected to internal
- switch
*/
struct mtk_mac {
int id;
- int ge_mode;
+ phy_interface_t interface;
+ unsigned int mode;
+ int speed;
struct device_node *of_node;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
- bool trgmii;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
@@ -845,7 +921,12 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
u32 ana_rgc3);
int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id);
-int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode);
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+ const struct phylink_link_state *state);
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index ff509d42d818..4db27dfc7ec1 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -16,8 +16,7 @@
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
{
struct device_node *np;
- const char *str;
- int i, err;
+ int i;
ss->ana_rgc3 = ana_rgc3;
@@ -29,19 +28,6 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
ss->regmap[i] = syscon_node_to_regmap(np);
if (IS_ERR(ss->regmap[i]))
return PTR_ERR(ss->regmap[i]);
-
- err = of_property_read_string(np, "mediatek,physpeed", &str);
- if (err)
- return err;
-
- if (!strcmp(str, "2500"))
- ss->flags[i] |= MTK_SGMII_PHYSPEED_2500;
- else if (!strcmp(str, "1000"))
- ss->flags[i] |= MTK_SGMII_PHYSPEED_1000;
- else if (!strcmp(str, "auto"))
- ss->flags[i] |= MTK_SGMII_PHYSPEED_AN;
- else
- return -EINVAL;
}
return 0;
@@ -73,27 +59,45 @@ int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
return 0;
}
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+ const struct phylink_link_state *state)
{
unsigned int val;
- int mode;
if (!ss->regmap[id])
return -EINVAL;
regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
- val &= ~GENMASK(3, 2);
- mode = ss->flags[id] & MTK_SGMII_PHYSPEED_MASK;
- val |= (mode == MTK_SGMII_PHYSPEED_1000) ? 0 : BIT(2);
+ val &= ~RG_PHY_SPEED_MASK;
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ val |= RG_PHY_SPEED_3_125G;
regmap_write(ss->regmap[id], ss->ana_rgc3, val);
/* Disable SGMII AN */
regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
- val &= ~BIT(12);
+ val &= ~SGMII_AN_ENABLE;
regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
/* SGMII force mode setting */
- val = 0x31120019;
+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ val &= ~SGMII_IF_MODE_MASK;
+
+ switch (state->speed) {
+ case SPEED_10:
+ val |= SGMII_SPEED_10;
+ break;
+ case SPEED_100:
+ val |= SGMII_SPEED_100;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ val |= SGMII_SPEED_1000;
+ break;
+ };
+
+ if (state->duplex == DUPLEX_FULL)
+ val |= SGMII_DUPLEX_FULL;
+
regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
/* Release PHYA power down state */
@@ -103,3 +107,20 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
return 0;
}
+
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
+{
+ struct mtk_sgmii *ss = eth->sgmii;
+ unsigned int val, sid;
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac_id;
+
+ if (!ss->regmap[sid])
+ return;
+
+ regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
+ val |= SGMII_AN_RESTART;
+ regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index 88316c743820..eaf08f7ad128 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -99,8 +99,7 @@ static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
readl(cr_space + offset);
err = devlink_region_snapshot_create(crdump->region_crspace,
- cr_res_size, crspace_data,
- id, &kvfree);
+ crspace_data, id, &kvfree);
if (err) {
kvfree(crspace_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
@@ -139,9 +138,7 @@ static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
readl(health_buf_start + offset);
err = devlink_region_snapshot_create(crdump->region_fw_health,
- HEALTH_BUFFER_SIZE,
- health_data,
- id, &kvfree);
+ health_data, id, &kvfree);
if (err) {
kvfree(health_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index c1438ae52a11..40ec5acf79c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2645,14 +2645,6 @@ out:
en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
return;
}
-
- /* set offloads */
- priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2660,14 +2652,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
int ret;
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
- /* unset offloads */
- priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL);
-
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
if (ret)
@@ -3415,6 +3399,23 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->LSO_support)
dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (mdev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ }
+
dev->vlan_features = dev->hw_features;
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
@@ -3483,16 +3484,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
- if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
- dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
- }
-
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 36a92b19e613..4d5ca302c067 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -772,9 +772,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
/* Map fragments if any */
for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
- const struct skb_frag_struct *frag;
-
- frag = &shinfo->frags[i_frag];
+ const skb_frag_t *frag = &shinfo->frags[i_frag];
byte_count = skb_frag_size(frag);
dma = skb_frag_dma_map(ddev, frag,
0, byte_count,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 1f6e16d5ea6b..07c204bd3fc4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2292,23 +2292,31 @@ static int mlx4_init_fw(struct mlx4_dev *dev)
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_init_hca_param *init_hca = NULL;
+ struct mlx4_dev_cap *dev_cap = NULL;
struct mlx4_adapter adapter;
- struct mlx4_dev_cap dev_cap;
struct mlx4_profile profile;
- struct mlx4_init_hca_param init_hca;
u64 icm_size;
struct mlx4_config_dev_params params;
int err;
if (!mlx4_is_slave(dev)) {
- err = mlx4_dev_cap(dev, &dev_cap);
+ dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
+ init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL);
+
+ if (!dev_cap || !init_hca) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = mlx4_dev_cap(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
- return err;
+ goto out_free;
}
- choose_steering_mode(dev, &dev_cap);
- choose_tunnel_offload_mode(dev, &dev_cap);
+ choose_steering_mode(dev, dev_cap);
+ choose_tunnel_offload_mode(dev, dev_cap);
if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
mlx4_is_master(dev))
@@ -2331,48 +2339,48 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
MLX4_STEERING_MODE_DEVICE_MANAGED)
profile.num_mcg = MLX4_FS_NUM_MCG;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
- &init_hca);
+ icm_size = mlx4_make_profile(dev, &profile, dev_cap,
+ init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
- return err;
+ goto out_free;
}
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
if (enable_4k_uar || !dev->persist->num_vfs) {
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
+ init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
- init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
+ init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
} else {
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
- init_hca.uar_page_sz = PAGE_SHIFT - 12;
+ init_hca->log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca->uar_page_sz = PAGE_SHIFT - 12;
}
- init_hca.mw_enabled = 0;
+ init_hca->mw_enabled = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
- init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
+ init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size);
if (err)
- return err;
+ goto out_free;
- err = mlx4_INIT_HCA(dev, &init_hca);
+ err = mlx4_INIT_HCA(dev, init_hca);
if (err) {
mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
- if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
- err = mlx4_query_func(dev, &dev_cap);
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ err = mlx4_query_func(dev, dev_cap);
if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
- dev->caps.num_eqs = dev_cap.max_eqs;
- dev->caps.reserved_eqs = dev_cap.reserved_eqs;
- dev->caps.reserved_uars = dev_cap.reserved_uars;
+ dev->caps.num_eqs = dev_cap->max_eqs;
+ dev->caps.reserved_eqs = dev_cap->reserved_eqs;
+ dev->caps.reserved_uars = dev_cap->reserved_uars;
}
}
@@ -2381,14 +2389,13 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
* read HCA frequency by QUERY_HCA command
*/
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
- memset(&init_hca, 0, sizeof(init_hca));
- err = mlx4_QUERY_HCA(dev, &init_hca);
+ err = mlx4_QUERY_HCA(dev, init_hca);
if (err) {
mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
} else {
dev->caps.hca_core_clock =
- init_hca.hca_core_clock;
+ init_hca->hca_core_clock;
}
/* In case we got HCA frequency 0 - disable timestamping
@@ -2464,7 +2471,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
priv->eq_table.inta_pin = adapter.inta_pin;
memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
- return 0;
+ err = 0;
+ goto out_free;
unmap_bf:
unmap_internal_clock(dev);
@@ -2483,6 +2491,10 @@ err_free_icm:
if (!mlx4_is_slave(dev))
mlx4_free_icms(dev);
+out_free:
+ kfree(dev_cap);
+ kfree(init_hca);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 57d2cc666fe3..f4de9ccb5df1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -23,8 +23,9 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
- en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \
- en/params.o en/xsk/umem.o en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
+ en_selftest.o en/port.o en/monitor_stats.o en/health.o \
+ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
#
# Netdev extra
@@ -34,7 +35,8 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
- en/tc_tun_geneve.o
+ en/tc_tun_geneve.o diag/en_tc_tracepoint.o
+mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
#
# Core extra
@@ -44,6 +46,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
+mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o lib/hv_vhca.o
#
# Ipoib netdev
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8cdd7e66f8df..ea934cd02448 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_UMEM:
case MLX5_CMD_OP_DESTROY_UMEM:
case MLX5_CMD_OP_ALLOC_MEMIC:
+ case MLX5_CMD_OP_MODIFY_XRQ:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -637,6 +639,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
MLX5_COMMAND_STR_CASE(CREATE_UMEM);
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
+ MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
+ MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
default: return "unknown command opcode";
}
}
@@ -1368,49 +1372,19 @@ static void clean_debug_files(struct mlx5_core_dev *dev)
debugfs_remove_recursive(dbg->dbg_root);
}
-static int create_debugfs_files(struct mlx5_core_dev *dev)
+static void create_debugfs_files(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- int err = -ENOMEM;
-
- if (!mlx5_debugfs_root)
- return 0;
dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
- if (!dbg->dbg_root)
- return err;
-
- dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
- dev, &dfops);
- if (!dbg->dbg_in)
- goto err_dbg;
- dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
- dev, &dfops);
- if (!dbg->dbg_out)
- goto err_dbg;
-
- dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
- dev, &olfops);
- if (!dbg->dbg_outlen)
- goto err_dbg;
-
- dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
- &dbg->status);
- if (!dbg->dbg_status)
- goto err_dbg;
-
- dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
- if (!dbg->dbg_run)
- goto err_dbg;
+ debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
+ debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
+ debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
+ debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
+ debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
mlx5_cmdif_debugfs_init(dev);
-
- return 0;
-
-err_dbg:
- clean_debug_files(dev);
- return err;
}
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
@@ -2007,17 +1981,10 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_cache;
}
- err = create_debugfs_files(dev);
- if (err) {
- err = -ENOMEM;
- goto err_wq;
- }
+ create_debugfs_files(dev);
return 0;
-err_wq:
- destroy_workqueue(cmd->wq);
-
err_cache:
destroy_msg_cache(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index a11e22d0b0cc..04854e5fbcd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -92,8 +92,6 @@ EXPORT_SYMBOL(mlx5_debugfs_root);
void mlx5_register_debugfs(void)
{
mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
- if (IS_ERR_OR_NULL(mlx5_debugfs_root))
- mlx5_debugfs_root = NULL;
}
void mlx5_unregister_debugfs(void)
@@ -101,45 +99,25 @@ void mlx5_unregister_debugfs(void)
debugfs_remove(mlx5_debugfs_root);
}
-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
atomic_set(&dev->num_qps, 0);
dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
- if (!dev->priv.qp_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.qp_debugfs);
}
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
- if (!dev->priv.eq_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.eq_debugfs);
}
@@ -183,85 +161,41 @@ static const struct file_operations stats_fops = {
.write = average_write,
};
-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
struct dentry **cmd;
const char *namep;
- int err;
int i;
- if (!mlx5_debugfs_root)
- return 0;
-
cmd = &dev->priv.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
- if (!*cmd)
- return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
stats->root = debugfs_create_dir(namep, *cmd);
- if (!stats->root) {
- mlx5_core_warn(dev, "failed adding command %d\n",
- i);
- err = -ENOMEM;
- goto out;
- }
-
- stats->avg = debugfs_create_file("average", 0400,
- stats->root, stats,
- &stats_fops);
- if (!stats->avg) {
- mlx5_core_warn(dev, "failed creating debugfs file\n");
- err = -ENOMEM;
- goto out;
- }
-
- stats->count = debugfs_create_u64("n", 0400,
- stats->root,
- &stats->n);
- if (!stats->count) {
- mlx5_core_warn(dev, "failed creating debugfs file\n");
- err = -ENOMEM;
- goto out;
- }
+
+ debugfs_create_file("average", 0400, stats->root, stats,
+ &stats_fops);
+ debugfs_create_u64("n", 0400, stats->root, &stats->n);
}
}
-
- return 0;
-out:
- debugfs_remove_recursive(dev->priv.cmdif_debugfs);
- return err;
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.cmdif_debugfs);
}
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
- if (!dev->priv.cq_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.cq_debugfs);
}
@@ -484,7 +418,6 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
{
struct mlx5_rsc_debug *d;
char resn[32];
- int err;
int i;
d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
@@ -496,30 +429,15 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
d->type = type;
sprintf(resn, "0x%x", rsn);
d->root = debugfs_create_dir(resn, root);
- if (!d->root) {
- err = -ENOMEM;
- goto out_free;
- }
for (i = 0; i < nfile; i++) {
d->fields[i].i = i;
- d->fields[i].dent = debugfs_create_file(field[i], 0400,
- d->root, &d->fields[i],
- &fops);
- if (!d->fields[i].dent) {
- err = -ENOMEM;
- goto out_rem;
- }
+ debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
+ &fops);
}
*dbg = d;
return 0;
-out_rem:
- debugfs_remove_recursive(d->root);
-
-out_free:
- kfree(d);
- return err;
}
static void rem_res_tree(struct mlx5_rsc_debug *d)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
new file mode 100644
index 000000000000..1177860a2ee4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_EN_REP_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_EN_REP_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include "en_rep.h"
+
+TRACE_EVENT(mlx5e_rep_neigh_update,
+ TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, const u8 *ha,
+ bool neigh_connected),
+ TP_ARGS(nhe, ha, neigh_connected),
+ TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ __array(u8, ha, ETH_ALEN)
+ __array(u8, v4, 4)
+ __array(u8, v6, 16)
+ __field(bool, neigh_connected)
+ ),
+ TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh;
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __assign_str(devname, mn->dev->name);
+ __entry->neigh_connected = neigh_connected;
+ memcpy(__entry->ha, ha, ETH_ALEN);
+
+ p32 = (__be32 *)__entry->v4;
+ pin6 = (struct in6_addr *)__entry->v6;
+ if (mn->family == AF_INET) {
+ *p32 = mn->dst_ip.v4;
+ ipv6_addr_set_v4mapped(*p32, pin6);
+ } else if (mn->family == AF_INET6) {
+ *pin6 = mn->dst_ip.v6;
+ }
+ ),
+ TP_printk("netdev: %s MAC: %pM IPv4: %pI4 IPv6: %pI6c neigh_connected=%d\n",
+ __get_str(devname), __entry->ha,
+ __entry->v4, __entry->v6, __entry->neigh_connected
+ )
+);
+
+#endif /* _MLX5_EN_REP_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE en_rep_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c
new file mode 100644
index 000000000000..c5dc6c50fa87
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#define CREATE_TRACE_POINTS
+#include "en_tc_tracepoint.h"
+
+void put_ids_to_array(int *ids,
+ const struct flow_action_entry *entries,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ ids[i] = entries[i].id;
+}
+
+#define NAME_SIZE 16
+
+static const char FLOWACT2STR[NUM_FLOW_ACTIONS][NAME_SIZE] = {
+ [FLOW_ACTION_ACCEPT] = "ACCEPT",
+ [FLOW_ACTION_DROP] = "DROP",
+ [FLOW_ACTION_TRAP] = "TRAP",
+ [FLOW_ACTION_GOTO] = "GOTO",
+ [FLOW_ACTION_REDIRECT] = "REDIRECT",
+ [FLOW_ACTION_MIRRED] = "MIRRED",
+ [FLOW_ACTION_VLAN_PUSH] = "VLAN_PUSH",
+ [FLOW_ACTION_VLAN_POP] = "VLAN_POP",
+ [FLOW_ACTION_VLAN_MANGLE] = "VLAN_MANGLE",
+ [FLOW_ACTION_TUNNEL_ENCAP] = "TUNNEL_ENCAP",
+ [FLOW_ACTION_TUNNEL_DECAP] = "TUNNEL_DECAP",
+ [FLOW_ACTION_MANGLE] = "MANGLE",
+ [FLOW_ACTION_ADD] = "ADD",
+ [FLOW_ACTION_CSUM] = "CSUM",
+ [FLOW_ACTION_MARK] = "MARK",
+ [FLOW_ACTION_WAKE] = "WAKE",
+ [FLOW_ACTION_QUEUE] = "QUEUE",
+ [FLOW_ACTION_SAMPLE] = "SAMPLE",
+ [FLOW_ACTION_POLICE] = "POLICE",
+ [FLOW_ACTION_CT] = "CT",
+};
+
+const char *parse_action(struct trace_seq *p,
+ int *ids,
+ unsigned int num)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ if (ids[i] < NUM_FLOW_ACTIONS)
+ trace_seq_printf(p, "%s ", FLOWACT2STR[ids[i]]);
+ else
+ trace_seq_printf(p, "UNKNOWN ");
+ }
+
+ trace_seq_putc(p, 0);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
new file mode 100644
index 000000000000..d4e6cfaaade3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_TC_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_TC_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include <net/flow_offload.h>
+#include "en_rep.h"
+
+#define __parse_action(ids, num) parse_action(p, ids, num)
+
+void put_ids_to_array(int *ids,
+ const struct flow_action_entry *entries,
+ unsigned int num);
+
+const char *parse_action(struct trace_seq *p,
+ int *ids,
+ unsigned int num);
+
+DECLARE_EVENT_CLASS(mlx5e_flower_template,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f),
+ TP_STRUCT__entry(__field(void *, cookie)
+ __field(unsigned int, num)
+ __dynamic_array(int, ids, f->rule ?
+ f->rule->action.num_entries : 0)
+ ),
+ TP_fast_assign(__entry->cookie = (void *)f->cookie;
+ __entry->num = (f->rule ?
+ f->rule->action.num_entries : 0);
+ if (__entry->num)
+ put_ids_to_array(__get_dynamic_array(ids),
+ f->rule->action.entries,
+ f->rule->action.num_entries);
+ ),
+ TP_printk("cookie=%p actions= %s\n",
+ __entry->cookie, __entry->num ?
+ __parse_action(__get_dynamic_array(ids),
+ __entry->num) : "NULL"
+ )
+);
+
+DEFINE_EVENT(mlx5e_flower_template, mlx5e_configure_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f)
+ );
+
+DEFINE_EVENT(mlx5e_flower_template, mlx5e_delete_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f)
+ );
+
+TRACE_EVENT(mlx5e_stats_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f),
+ TP_STRUCT__entry(__field(void *, cookie)
+ __field(u64, bytes)
+ __field(u64, packets)
+ __field(u64, lastused)
+ ),
+ TP_fast_assign(__entry->cookie = (void *)f->cookie;
+ __entry->bytes = f->stats.bytes;
+ __entry->packets = f->stats.pkts;
+ __entry->lastused = f->stats.lastused;
+ ),
+ TP_printk("cookie=%p bytes=%llu packets=%llu lastused=%llu\n",
+ __entry->cookie, __entry->bytes,
+ __entry->packets, __entry->lastused
+ )
+);
+
+TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
+ TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, bool neigh_used),
+ TP_ARGS(nhe, neigh_used),
+ TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ __array(u8, v4, 4)
+ __array(u8, v6, 16)
+ __field(bool, neigh_used)
+ ),
+ TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh;
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __assign_str(devname, mn->dev->name);
+ __entry->neigh_used = neigh_used;
+
+ p32 = (__be32 *)__entry->v4;
+ pin6 = (struct in6_addr *)__entry->v6;
+ if (mn->family == AF_INET) {
+ *p32 = mn->dst_ip.v4;
+ ipv6_addr_set_v4mapped(*p32, pin6);
+ } else if (mn->family == AF_INET6) {
+ *pin6 = mn->dst_ip.v6;
+ }
+ ),
+ TP_printk("netdev: %s IPv4: %pI4 IPv6: %pI6c neigh_used=%d\n",
+ __get_str(devname), __entry->v4, __entry->v6,
+ __entry->neigh_used
+ )
+);
+
+#endif /* _MLX5_TC_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE en_tc_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 8a4930c8bf62..2011eaf15cc5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -546,7 +546,7 @@ static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer,
trace_data->timestamp = timestamp;
trace_data->lost = lost;
trace_data->event_id = event_id;
- strncpy(trace_data->msg, msg, TRACE_STR_MSG);
+ strscpy_pad(trace_data->msg, msg, TRACE_STR_MSG);
tracer->st_arr.saved_traces_index =
(tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 65bec19a438f..8d76452cacdc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
#include "mlx5_core.h"
#include "en_stats.h"
#include "en/fs.h"
+#include "lib/hv_vhca.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -162,6 +163,14 @@ enum mlx5e_rq_group {
#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
};
+static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
+{
+ if (mlx5_lag_is_lacp_owner(mdev))
+ return 1;
+
+ return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
+}
+
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
switch (wq_type) {
@@ -300,6 +309,7 @@ struct mlx5e_dcbx_dp {
enum {
MLX5E_RQ_STATE_ENABLED,
+ MLX5E_RQ_STATE_RECOVERING,
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
@@ -356,6 +366,7 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
+ MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
};
struct mlx5e_sq_wqe_info {
@@ -480,8 +491,6 @@ struct mlx5e_xdp_mpwqe {
struct mlx5e_tx_wqe *wqe;
u8 ds_count;
u8 pkt_count;
- u8 max_ds_count;
- u8 complete;
u8 inline_on;
};
@@ -552,6 +561,8 @@ struct mlx5e_icosq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+
+ struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info {
@@ -671,6 +682,8 @@ struct mlx5e_rq {
struct zero_copy_allocator zca;
struct xdp_umem *umem;
+ struct work_struct recover_work;
+
/* control */
struct mlx5_wq_ctrl wq_ctrl;
__be32 mkey_be;
@@ -700,6 +713,7 @@ struct mlx5e_channel {
struct net_device *netdev;
__be32 mkey_be;
u8 num_tc;
+ u8 lag_port;
/* XDP_REDIRECT */
struct mlx5e_xdpsq xdpsq;
@@ -778,6 +792,15 @@ struct mlx5e_modify_sq_param {
int rl_index;
};
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+struct mlx5e_hv_vhca_stats_agent {
+ struct mlx5_hv_vhca_agent *agent;
+ struct delayed_work work;
+ u16 delay;
+ void *buf;
+};
+#endif
+
struct mlx5e_xsk {
/* UMEMs are stored separately from channels, because we don't want to
* lose them when channels are recreated. The kernel also stores UMEMs,
@@ -804,7 +827,7 @@ struct mlx5e_priv {
struct mlx5e_rq drop_rq;
struct mlx5e_channels channels;
- u32 tisn[MLX5E_MAX_NUM_TC];
+ u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
@@ -847,7 +870,11 @@ struct mlx5e_priv {
struct mlx5e_tls *tls;
#endif
struct devlink_health_reporter *tx_reporter;
+ struct devlink_health_reporter *rx_reporter;
struct mlx5e_xsk xsk;
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+ struct mlx5e_hv_vhca_stats_agent stats_agent;
+#endif
};
struct mlx5e_profile {
@@ -888,6 +915,26 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ default:
+ return mlx5_wq_cyc_get_size(&rq->wqe.wq);
+ }
+}
+
+static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return rq->mpwqe.wq.cur_sz;
+ default:
+ return rq->wqe.wq.cur_sz;
+ }
+}
+
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
@@ -1006,18 +1053,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
+int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
+void mlx5e_activate_rq(struct mlx5e_rq *rq);
+void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
+void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_tx_disable_queue(struct netdev_queue *txq);
-static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
-{
- return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
-}
-
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_ETH(mdev, swp) &&
@@ -1063,6 +1110,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
int mlx5e_create_tises(struct mlx5e_priv *priv);
+void mlx5e_destroy_tises(struct mlx5e_priv *priv);
int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
@@ -1135,7 +1183,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels);
-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index be5961ff24cc..68d593074f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -10,11 +10,14 @@ enum {
};
struct mlx5e_tc_table {
+ /* protects flow table */
+ struct mutex t_lock;
struct mlx5_flow_table *t;
struct rhashtable ht;
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ struct mod_hdr_tbl mod_hdr;
+ struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
@@ -92,9 +95,15 @@ struct mlx5e_tirc_config {
enum mlx5e_tunnel_types {
MLX5E_TT_IPV4_GRE,
MLX5E_TT_IPV6_GRE,
+ MLX5E_TT_IPV4_IPIP,
+ MLX5E_TT_IPV6_IPIP,
+ MLX5E_TT_IPV4_IPV6,
+ MLX5E_TT_IPV6_IPV6,
MLX5E_NUM_TUNNEL_TT,
};
+bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
+
/* L3/L4 traffic type classifier */
struct mlx5e_ttc_table {
struct mlx5e_flow_table ft;
@@ -132,12 +141,17 @@ struct mlx5e_ethtool_steering {
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
-int mlx5e_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rule_locs);
+int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
+static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS
@@ -224,5 +238,8 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type);
+bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev);
+
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
new file mode 100644
index 000000000000..1d6b58860da6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include "health.h"
+#include "lib/eq.h"
+
+int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = cq->channel->priv;
+ u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
+ u8 hw_status;
+ void *cqc;
+ int err;
+
+ err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out));
+ if (err)
+ return err;
+
+ cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context);
+ hw_status = MLX5_GET(cqc, cqc, status);
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+{
+ u8 cq_log_stride;
+ u32 cq_sz;
+ int err;
+
+ cq_sz = mlx5_cqwq_get_size(&cq->wq);
+ cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq);
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_health_create_reporters(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_reporter_tx_create(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_rx_create(priv);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv)
+{
+ mlx5e_reporter_rx_destroy(priv);
+ mlx5e_reporter_tx_destroy(priv);
+}
+
+void mlx5e_health_channels_update(struct mlx5e_priv *priv)
+{
+ if (priv->tx_reporter)
+ devlink_health_reporter_state_update(priv->tx_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ if (priv->rx_reporter)
+ devlink_health_reporter_state_update(priv->rx_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn)
+{
+ struct mlx5_core_dev *mdev = channel->mdev;
+ struct net_device *dev = channel->netdev;
+ struct mlx5e_modify_sq_param msp = {};
+ int err;
+
+ msp.curr_state = MLX5_SQC_STATE_ERR;
+ msp.next_state = MLX5_SQC_STATE_RST;
+
+ err = mlx5e_modify_sq(mdev, sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to reset\n", sqn);
+ return err;
+ }
+
+ memset(&msp, 0, sizeof(msp));
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+
+ err = mlx5e_modify_sq(mdev, sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to ready\n", sqn);
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
+{
+ int err = 0;
+
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ err = mlx5e_safe_reopen_channels(priv);
+
+out:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+
+ return err;
+}
+
+int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel)
+{
+ u32 eqe_count;
+
+ netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+ if (!eqe_count)
+ return -EIO;
+
+ netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n",
+ eqe_count, eq->core.eqn);
+
+ channel->stats->eq_rearm++;
+ return 0;
+}
+
+int mlx5e_health_report(struct mlx5e_priv *priv,
+ struct devlink_health_reporter *reporter, char *err_str,
+ struct mlx5e_err_ctx *err_ctx)
+{
+ if (!reporter) {
+ netdev_err(priv->netdev, err_str);
+ return err_ctx->recover(&err_ctx->ctx);
+ }
+ return devlink_health_report(reporter, err_str, err_ctx);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
new file mode 100644
index 000000000000..d3693fa547ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5E_EN_HEALTH_H
+#define __MLX5E_EN_HEALTH_H
+
+#include "en.h"
+
+#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+
+static inline bool cqe_syndrome_needs_recover(u8 syndrome)
+{
+ return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+}
+
+int mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
+void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv);
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
+
+int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
+int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg);
+
+int mlx5e_reporter_rx_create(struct mlx5e_priv *priv);
+void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
+
+#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
+
+struct mlx5e_err_ctx {
+ int (*recover)(void *ctx);
+ void *ctx;
+};
+
+int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn);
+int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel);
+int mlx5e_health_recover_channels(struct mlx5e_priv *priv);
+int mlx5e_health_report(struct mlx5e_priv *priv,
+ struct devlink_health_reporter *reporter, char *err_str,
+ struct mlx5e_err_ctx *err_ctx);
+int mlx5e_health_create_reporters(struct mlx5e_priv *priv);
+void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
+void mlx5e_health_channels_update(struct mlx5e_priv *priv);
+
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
new file mode 100644
index 000000000000..c37b4acd9bd5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include "en.h"
+#include "en/hv_vhca_stats.h"
+#include "lib/hv_vhca.h"
+#include "lib/hv.h"
+
+struct mlx5e_hv_vhca_per_ring_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+static void
+mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch,
+ struct mlx5e_hv_vhca_per_ring_stats *data)
+{
+ struct mlx5e_channel_stats *stats;
+ int tc;
+
+ stats = &priv->channel_stats[ch];
+ data->rx_packets = stats->rq.packets;
+ data->rx_bytes = stats->rq.bytes;
+
+ for (tc = 0; tc < priv->max_opened_tc; tc++) {
+ data->tx_packets += stats->sq[tc].packets;
+ data->tx_bytes += stats->sq[tc].bytes;
+ }
+}
+
+static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int buf_len)
+{
+ int ch, i = 0;
+
+ for (ch = 0; ch < priv->max_nch; ch++) {
+ u64 *buf = data + i;
+
+ if (WARN_ON_ONCE(buf +
+ sizeof(struct mlx5e_hv_vhca_per_ring_stats) >
+ data + buf_len))
+ return;
+
+ mlx5e_hv_vhca_fill_ring_stats(priv, ch,
+ (struct mlx5e_hv_vhca_per_ring_stats *)buf);
+ i += sizeof(struct mlx5e_hv_vhca_per_ring_stats) / sizeof(u64);
+ }
+}
+
+static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv)
+{
+ return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) *
+ priv->max_nch);
+}
+
+static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
+{
+ struct mlx5e_hv_vhca_stats_agent *sagent;
+ struct mlx5_hv_vhca_agent *agent;
+ struct delayed_work *dwork;
+ struct mlx5e_priv *priv;
+ int buf_len, rc;
+ void *buf;
+
+ dwork = to_delayed_work(work);
+ sagent = container_of(dwork, struct mlx5e_hv_vhca_stats_agent, work);
+ priv = container_of(sagent, struct mlx5e_priv, stats_agent);
+ buf_len = mlx5e_hv_vhca_stats_buf_size(priv);
+ agent = sagent->agent;
+ buf = sagent->buf;
+
+ memset(buf, 0, buf_len);
+ mlx5e_hv_vhca_fill_stats(priv, buf, buf_len);
+
+ rc = mlx5_hv_vhca_agent_write(agent, buf, buf_len);
+ if (rc) {
+ mlx5_core_err(priv->mdev,
+ "%s: Failed to write stats, err = %d\n",
+ __func__, rc);
+ return;
+ }
+
+ if (sagent->delay)
+ queue_delayed_work(priv->wq, &sagent->work, sagent->delay);
+}
+
+enum {
+ MLX5_HV_VHCA_STATS_VERSION = 1,
+ MLX5_HV_VHCA_STATS_UPDATE_ONCE = 0xFFFF,
+};
+
+static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_control_block *block)
+{
+ struct mlx5e_hv_vhca_stats_agent *sagent;
+ struct mlx5e_priv *priv;
+
+ priv = mlx5_hv_vhca_agent_priv(agent);
+ sagent = &priv->stats_agent;
+
+ block->version = MLX5_HV_VHCA_STATS_VERSION;
+ block->rings = priv->max_nch;
+
+ if (!block->command) {
+ cancel_delayed_work_sync(&priv->stats_agent.work);
+ return;
+ }
+
+ sagent->delay = block->command == MLX5_HV_VHCA_STATS_UPDATE_ONCE ? 0 :
+ msecs_to_jiffies(block->command * 100);
+
+ queue_delayed_work(priv->wq, &sagent->work, sagent->delay);
+}
+
+static void mlx5e_hv_vhca_stats_cleanup(struct mlx5_hv_vhca_agent *agent)
+{
+ struct mlx5e_priv *priv = mlx5_hv_vhca_agent_priv(agent);
+
+ cancel_delayed_work_sync(&priv->stats_agent.work);
+}
+
+int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
+{
+ int buf_len = mlx5e_hv_vhca_stats_buf_size(priv);
+ struct mlx5_hv_vhca_agent *agent;
+
+ priv->stats_agent.buf = kvzalloc(buf_len, GFP_KERNEL);
+ if (!priv->stats_agent.buf)
+ return -ENOMEM;
+
+ agent = mlx5_hv_vhca_agent_create(priv->mdev->hv_vhca,
+ MLX5_HV_VHCA_AGENT_STATS,
+ mlx5e_hv_vhca_stats_control, NULL,
+ mlx5e_hv_vhca_stats_cleanup,
+ priv);
+
+ if (IS_ERR_OR_NULL(agent)) {
+ if (IS_ERR(agent))
+ netdev_warn(priv->netdev,
+ "Failed to create hv vhca stats agent, err = %ld\n",
+ PTR_ERR(agent));
+
+ kfree(priv->stats_agent.buf);
+ return IS_ERR_OR_NULL(agent);
+ }
+
+ priv->stats_agent.agent = agent;
+ INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work);
+
+ return 0;
+}
+
+void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
+{
+ if (IS_ERR_OR_NULL(priv->stats_agent.agent))
+ return;
+
+ mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
+ kfree(priv->stats_agent.buf);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
new file mode 100644
index 000000000000..664463faf77b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_STATS_VHCA_H__
+#define __MLX5_EN_STATS_VHCA_H__
+#include "en.h"
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv);
+void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv);
+
+#else
+
+static inline int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
+{
+}
+#endif
+
+#endif /* __MLX5_EN_STATS_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h
deleted file mode 100644
index e78e92753d73..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#ifndef __MLX5E_EN_REPORTER_H
-#define __MLX5E_EN_REPORTER_H
-
-#include <linux/mlx5/driver.h>
-#include "en.h"
-
-int mlx5e_tx_reporter_create(struct mlx5e_priv *priv);
-void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv);
-void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq);
-int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq);
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
new file mode 100644
index 000000000000..b860569d4247
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include "health.h"
+#include "params.h"
+
+static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+ void *out;
+ void *rqc;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rq(dev, rqn, out);
+ if (err)
+ goto out;
+
+ rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
+ *state = MLX5_GET(rqc, rqc, state);
+
+out:
+ kvfree(out);
+ return err;
+}
+
+static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+
+ while (time_before(jiffies, exp_time)) {
+ if (icosq->cc == icosq->pc)
+ return 0;
+
+ msleep(20);
+ }
+
+ netdev_err(icosq->channel->netdev,
+ "Wait for ICOSQ 0x%x flush timeout (cc = 0x%x, pc = 0x%x)\n",
+ icosq->sqn, icosq->cc, icosq->pc);
+
+ return -ETIMEDOUT;
+}
+
+static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
+{
+ WARN_ONCE(icosq->cc != icosq->pc, "ICOSQ 0x%x: cc (0x%x) != pc (0x%x)\n",
+ icosq->sqn, icosq->cc, icosq->pc);
+ icosq->cc = 0;
+ icosq->pc = 0;
+}
+
+static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
+{
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_icosq *icosq;
+ struct net_device *dev;
+ struct mlx5e_rq *rq;
+ u8 state;
+ int err;
+
+ icosq = ctx;
+ rq = &icosq->channel->rq;
+ mdev = icosq->channel->mdev;
+ dev = icosq->channel->netdev;
+ err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query ICOSQ 0x%x state. err = %d\n",
+ icosq->sqn, err);
+ goto out;
+ }
+
+ if (state != MLX5_SQC_STATE_ERR)
+ goto out;
+
+ mlx5e_deactivate_rq(rq);
+ err = mlx5e_wait_for_icosq_flush(icosq);
+ if (err)
+ goto out;
+
+ mlx5e_deactivate_icosq(icosq);
+
+ /* At this point, both the rq and the icosq are disabled */
+
+ err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn);
+ if (err)
+ goto out;
+
+ mlx5e_reset_icosq_cc_pc(icosq);
+ mlx5e_free_rx_descs(rq);
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ mlx5e_activate_icosq(icosq);
+ mlx5e_activate_rq(rq);
+
+ rq->stats->recover++;
+ return 0;
+out:
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ return err;
+}
+
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_priv *priv = icosq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = icosq;
+ err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
+ sprintf(err_str, "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
+{
+ struct net_device *dev = rq->netdev;
+ int err;
+
+ err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
+ return err;
+ }
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
+{
+ struct mlx5_core_dev *mdev;
+ struct net_device *dev;
+ struct mlx5e_rq *rq;
+ u8 state;
+ int err;
+
+ rq = ctx;
+ mdev = rq->mdev;
+ dev = rq->netdev;
+ err = mlx5e_query_rq_state(mdev, rq->rqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query RQ 0x%x state. err = %d\n",
+ rq->rqn, err);
+ goto out;
+ }
+
+ if (state != MLX5_RQC_STATE_ERR)
+ goto out;
+
+ mlx5e_deactivate_rq(rq);
+ mlx5e_free_rx_descs(rq);
+
+ err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
+ if (err)
+ goto out;
+
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
+ mlx5e_activate_rq(rq);
+ rq->stats->recover++;
+ return 0;
+out:
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
+ return err;
+}
+
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
+ sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rx_reporter_timeout_recover(void *ctx)
+{
+ struct mlx5e_icosq *icosq;
+ struct mlx5_eq_comp *eq;
+ struct mlx5e_rq *rq;
+ int err;
+
+ rq = ctx;
+ icosq = &rq->channel->icosq;
+ eq = rq->cq.mcq.eq;
+ err = mlx5e_health_channel_eq_recover(eq, rq->channel);
+ if (err)
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+
+ return err;
+}
+
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+{
+ struct mlx5e_icosq *icosq = &rq->channel->icosq;
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+ sprintf(err_str, "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x\n",
+ icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
+{
+ return err_ctx->recover(err_ctx->ctx);
+}
+
+static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
+ void *context)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_rx_reporter_recover_from_ctx(err_ctx) :
+ mlx5e_health_recover_channels(priv);
+}
+
+static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ struct mlx5e_params *params;
+ struct mlx5e_icosq *icosq;
+ u8 icosq_hw_state;
+ int wqes_sz;
+ u8 hw_state;
+ u16 wq_head;
+ int err;
+
+ params = &priv->channels.params;
+ icosq = &rq->channel->icosq;
+ err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
+ if (err)
+ return err;
+
+ err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state);
+ if (err)
+ return err;
+
+ wqes_sz = mlx5e_rqwq_get_cur_sz(rq);
+ wq_head = params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq);
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "SW state", rq->state);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "ICOSQ HW state", icosq_hw_state);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_params *params = &priv->channels.params;
+ struct mlx5e_rq *generic_rq;
+ u32 rq_stride, rq_sz;
+ int i, err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ generic_rq = &priv->channels.c[0]->rq;
+ rq_sz = mlx5e_rqwq_get_size(generic_rq);
+ rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common config");
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ");
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
+
+ err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
+ if (err)
+ goto unlock;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ goto unlock;
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
+ .name = "rx",
+ .recover = mlx5e_rx_reporter_recover,
+ .diagnose = mlx5e_rx_reporter_diagnose,
+};
+
+#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
+
+int mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_create(devlink,
+ &mlx5_rx_reporter_ops,
+ MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
+ true, priv);
+ if (IS_ERR(reporter)) {
+ netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
+ PTR_ERR(reporter));
+ return PTR_ERR(reporter);
+ }
+ priv->rx_reporter = reporter;
+ return 0;
+}
+
+void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
+{
+ if (!priv->rx_reporter)
+ return;
+
+ devlink_health_reporter_destroy(priv->rx_reporter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index c7f86453c638..bfed558637c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -1,16 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 Mellanox Technologies. */
-#include <net/devlink.h>
-#include "reporter.h"
-#include "lib/eq.h"
-
-#define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256
-
-struct mlx5e_tx_err_ctx {
- int (*recover)(struct mlx5e_txqsq *sq);
- struct mlx5e_txqsq *sq;
-};
+#include "health.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
@@ -40,41 +31,20 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
sq->pc = 0;
}
-static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
+static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
- struct net_device *dev = sq->channel->netdev;
- struct mlx5e_modify_sq_param msp = {0};
+ struct mlx5_core_dev *mdev;
+ struct net_device *dev;
+ struct mlx5e_txqsq *sq;
+ u8 state;
int err;
- msp.curr_state = curr_state;
- msp.next_state = MLX5_SQC_STATE_RST;
-
- err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
- if (err) {
- netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
- return err;
- }
-
- memset(&msp, 0, sizeof(msp));
- msp.curr_state = MLX5_SQC_STATE_RST;
- msp.next_state = MLX5_SQC_STATE_RDY;
-
- err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
- if (err) {
- netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
- return err;
- }
-
- return 0;
-}
+ sq = ctx;
+ mdev = sq->channel->mdev;
+ dev = sq->channel->netdev;
-static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
-{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
- struct net_device *dev = sq->channel->netdev;
- u8 state;
- int err;
+ if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ return 0;
err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
if (err) {
@@ -97,7 +67,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
* pending WQEs. SQ can safely reset the SQ.
*/
- err = mlx5e_sq_to_ready(sq, state);
+ err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn);
if (err)
goto out;
@@ -112,115 +82,98 @@ out:
return err;
}
-static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
- char *err_str,
- struct mlx5e_tx_err_ctx *err_ctx)
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
{
- if (IS_ERR_OR_NULL(tx_reporter)) {
- netdev_err(err_ctx->sq->channel->netdev, err_str);
- return err_ctx->recover(err_ctx->sq);
- }
-
- return devlink_health_report(tx_reporter, err_str, err_ctx);
-}
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {0};
-void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq)
-{
- char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
- struct mlx5e_tx_err_ctx err_ctx = {0};
-
- err_ctx.sq = sq;
- err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
- mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
- &err_ctx);
+ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
-static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
+static int mlx5e_tx_reporter_timeout_recover(void *ctx)
{
- struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
- u32 eqe_count;
-
- netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
- eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+ struct mlx5_eq_comp *eq;
+ struct mlx5e_txqsq *sq;
+ int err;
- eqe_count = mlx5_eq_poll_irq_disabled(eq);
- if (!eqe_count) {
+ sq = ctx;
+ eq = sq->cq.mcq.eq;
+ err = mlx5e_health_channel_eq_recover(eq, sq->channel);
+ if (err)
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- return -EIO;
- }
- netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
- eqe_count, eq->core.eqn);
- sq->channel->stats->eq_rearm++;
- return 0;
+ return err;
}
-int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
{
- char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
- struct mlx5e_tx_err_ctx err_ctx;
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx;
- err_ctx.sq = sq;
- err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
sprintf(err_str,
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - sq->txq->trans_start));
- return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
- &err_ctx);
+ return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
/* state lock cannot be grabbed within this function.
* It can cause a dead lock or a read-after-free.
*/
-static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
-{
- return err_ctx->recover(err_ctx->sq);
-}
-
-static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
+static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
{
- int err = 0;
-
- rtnl_lock();
- mutex_lock(&priv->state_lock);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto out;
-
- err = mlx5e_safe_reopen_channels(priv);
-
-out:
- mutex_unlock(&priv->state_lock);
- rtnl_unlock();
-
- return err;
+ return err_ctx->recover(err_ctx->ctx);
}
static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
void *context)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_tx_err_ctx *err_ctx = context;
+ struct mlx5e_err_ctx *err_ctx = context;
return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
- mlx5e_tx_reporter_recover_all(priv);
+ mlx5e_health_recover_channels(priv);
}
static int
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
- u32 sqn, u8 state, bool stopped)
+ struct mlx5e_txqsq *sq, int tc)
{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ bool stopped = netif_xmit_stopped(sq->txq);
+ u8 state;
int err;
+ err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ if (err)
+ return err;
+
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn);
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
if (err)
return err;
@@ -232,6 +185,18 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
if (err)
return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg);
+ if (err)
+ return err;
+
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
@@ -243,31 +208,61 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- int i, err = 0;
+ struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
+ u32 sq_stride, sq_sz;
+
+ int i, tc, err = 0;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
+ sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq);
+ sq_stride = MLX5_SEND_WQE_BB;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common Config");
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ");
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_cq_common_diagnose(&generic_sq->cq, fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
if (err)
goto unlock;
- for (i = 0; i < priv->channels.num * priv->channels.params.num_tc;
- i++) {
- struct mlx5e_txqsq *sq = priv->txq2sq[i];
- u8 state;
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
- if (err)
- goto unlock;
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ struct mlx5e_txqsq *sq = &c->sq[tc];
- err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
- state,
- netif_xmit_stopped(sq->txq));
- if (err)
- goto unlock;
+ err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
+ if (err)
+ goto unlock;
+ }
}
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
@@ -286,25 +281,30 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
-int mlx5e_tx_reporter_create(struct mlx5e_priv *priv)
+int mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_health_reporter *reporter;
struct mlx5_core_dev *mdev = priv->mdev;
- struct devlink *devlink = priv_to_devlink(mdev);
+ struct devlink *devlink;
- priv->tx_reporter =
+ devlink = priv_to_devlink(mdev);
+ reporter =
devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD,
true, priv);
- if (IS_ERR(priv->tx_reporter))
+ if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
"Failed to create tx reporter, err = %ld\n",
- PTR_ERR(priv->tx_reporter));
- return IS_ERR_OR_NULL(priv->tx_reporter);
+ PTR_ERR(reporter));
+ return PTR_ERR(reporter);
+ }
+ priv->tx_reporter = reporter;
+ return 0;
}
-void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv)
+void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->tx_reporter))
+ if (!priv->tx_reporter)
return;
devlink_health_reporter_destroy(priv->tx_reporter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index a6a52806be45..4c4620db3d31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -31,29 +31,36 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+
+ rcu_read_lock();
+ uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
+ /* mlx5_lag_is_sriov() is a blocking function which can't be called
+ * while holding rcu read lock. Take the net_device for correctness
+ * sake.
+ */
+ if (uplink_upper)
+ dev_hold(uplink_upper);
+ rcu_read_unlock();
+
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
+ if (uplink_upper)
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
*/
+ *route_dev = dev;
if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
- dst_is_lag_dev) {
- *route_dev = dev;
+ dst_is_lag_dev || is_vlan_dev(*route_dev))
*out_dev = uplink_dev;
- } else {
- *route_dev = dev;
- if (is_vlan_dev(*route_dev))
- *out_dev = uplink_dev;
- else if (mlx5e_eswitch_rep(dev) &&
- mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
- *out_dev = *route_dev;
- else
- return -EOPNOTSUPP;
- }
+ else if (mlx5e_eswitch_rep(dev) &&
+ mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
+ *out_dev = *route_dev;
+ else
+ return -EOPNOTSUPP;
if (!(mlx5e_eswitch_rep(*out_dev) &&
mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index ddfe19adb3d9..87be96747902 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,7 +6,7 @@
#include "en.h"
-#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
@@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
mlx5_write64((__be32 *)ctrl, uar_map);
}
-static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
+static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
{
- return !!wqe->ctrl.tisn;
+ return cseg && !!cseg->tisn;
+}
+
+static inline u8
+mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct sk_buff *skb)
+{
+ u8 mode;
+
+ if (mlx5e_transport_inline_tx_wqe(cseg))
+ return MLX5_INLINE_MODE_TCP_UDP;
+
+ mode = sq->min_inline_mode;
+
+ if (skb_vlan_tag_present(skb) &&
+ test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
+
+ return mode;
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index b0b982cf69bb..1ed5c33e022f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -179,33 +179,19 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
- u8 wqebbs;
- u16 pi;
-
- mlx5e_xdpsq_fetch_wqe(sq, &session->wqe);
-
- prefetchw(session->wqe->data);
- session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
- session->pkt_count = 0;
- session->complete = 0;
+ u16 pi, contig_wqebbs;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
+ if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS))
+ mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs);
- wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi),
- MLX5E_XDP_MPW_MAX_WQEBBS);
+ session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi);
- session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs;
+ prefetchw(session->wqe->data);
+ session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ session->pkt_count = 0;
mlx5e_xdp_update_inline_state(sq);
@@ -244,7 +230,7 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- MLX5_SEND_WQE_MAX_WQEBBS))) {
+ MLX5E_XDPSQ_STOP_ROOM))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -285,8 +271,8 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(session->complete ||
- session->ds_count == session->max_ds_count))
+ if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) ||
+ session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index b90923932668..36ac1e3816b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -40,6 +40,26 @@
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
+#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM)
+
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
+#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
+ DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
+
+/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
+ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
+ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
+ * full-session WQE be cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
+#else
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
+#endif
+
+#define MLX5E_XDP_MPW_MAX_NUM_DS \
+ (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
@@ -114,6 +134,30 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
session->inline_on = 1;
}
+static inline bool
+mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
+{
+ return session->inline_on &&
+ session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
+}
+
+static inline void
+mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
+ u16 pi, u16 nnops)
+{
+ struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
+
+ edge_wi = wi + nnops;
+ /* fill sq frag edge with nops to avoid wqe wrapping two pages */
+ for (; wi < edge_wi; wi++) {
+ wi->num_wqebbs = 1;
+ wi->num_pkts = 0;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+
+ sq->stats->nops += nnops;
+}
+
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
@@ -126,20 +170,12 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
session->pkt_count++;
-#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
-
if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
struct mlx5_wqe_inline_seg *inline_dseg =
(struct mlx5_wqe_inline_seg *)dseg;
u16 ds_len = sizeof(*inline_dseg) + dma_len;
u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
- if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) {
- /* Not enough space for inline wqe, send with memory pointer */
- session->complete = true;
- goto no_inline;
- }
-
inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
memcpy(inline_dseg->data, xdptxd->data, dma_len);
@@ -148,21 +184,23 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
return;
}
-no_inline:
dseg->addr = cpu_to_be64(xdptxd->dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
session->ds_count++;
}
-static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq,
- struct mlx5e_tx_wqe **wqe)
+static inline struct mlx5e_tx_wqe *
+mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe *wqe;
+
+ *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ memset(wqe, 0, sizeof(*wqe));
- *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memset(*wqe, 0, sizeof(**wqe));
+ return wqe;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 7f78c004d12f..d360750b25b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -60,24 +60,28 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
struct mlx5e_channel *c)
{
- struct mlx5e_channel_param cparam = {};
+ struct mlx5e_channel_param *cparam;
struct dim_cq_moder icocq_moder = {};
int err;
if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
return -EINVAL;
- mlx5e_build_xsk_cparam(priv, params, xsk, &cparam);
+ cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
+ if (!cparam)
+ return -ENOMEM;
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq);
+ mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
+
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq);
if (unlikely(err))
- return err;
+ goto err_free_cparam;
- err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
if (unlikely(err))
goto err_close_rx_cq;
- err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq);
if (unlikely(err))
goto err_close_rq;
@@ -87,21 +91,23 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
* is disabled and then reenabled, but the SQ continues receiving CQEs
* from the old UMEM.
*/
- err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true);
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
if (unlikely(err))
goto err_close_tx_cq;
- err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq);
+ err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq);
if (unlikely(err))
goto err_close_sq;
/* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be
* triggered and NAPI to be called on the correct CPU.
*/
- err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq);
+ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq);
if (unlikely(err))
goto err_close_icocq;
+ kvfree(cparam);
+
spin_lock_init(&c->xskicosq_lock);
set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
@@ -123,6 +129,9 @@ err_close_rq:
err_close_rx_cq:
mlx5e_close_cq(&c->xskrq.cq);
+err_free_cparam:
+ kvfree(cparam);
+
return err;
}
@@ -141,6 +150,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
void mlx5e_activate_xsk(struct mlx5e_channel *c)
{
+ mlx5e_activate_icosq(&c->xskicosq);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
@@ -153,6 +163,7 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
{
mlx5e_deactivate_rq(&c->xskrq);
/* TX queue is disabled on close. */
+ mlx5e_deactivate_icosq(&c->xskicosq);
}
static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 35e188cf4ea4..fd2c75b4b519 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -26,6 +26,13 @@ int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid)
return -ENXIO;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
+ /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not
+ * active and not polled by NAPI. Return 0, because the upcoming
+ * activate will trigger the IRQ for us.
+ */
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
+ return 0;
+
spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq);
spin_unlock(&c->xskicosq_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 7833ddef0427..e5222d17df35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -408,7 +408,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
goto out;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev)))
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 1539cf3de5dc..f7890e0ce96c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -180,15 +180,3 @@ out:
return err;
}
-
-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
-{
- u8 min_inline_mode;
-
- mlx5_query_min_inline(mdev, &min_inline_mode);
- if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
- !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- min_inline_mode = MLX5_INLINE_MODE_L2;
-
- return min_inline_mode;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8dd31b5c740c..01f2918063af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
- params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
+ mlx5_query_min_inline(priv->mdev, &params->tx_min_inline_mode);
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 20e628c907e5..c5a9c20d7f00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1431,7 +1431,7 @@ static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
return ret;
}
-static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
+static __u32 mlx5e_reformat_wol_mode_mlx5_to_linux(u8 mode)
{
__u32 ret = 0;
@@ -1459,7 +1459,7 @@ static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
return ret;
}
-static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode)
+static u8 mlx5e_reformat_wol_mode_linux_to_mlx5(__u32 mode)
{
u8 ret = 0;
@@ -1505,7 +1505,7 @@ static void mlx5e_get_wol(struct net_device *netdev,
if (err)
return;
- wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
+ wol->wolopts = mlx5e_reformat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
}
static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1521,7 +1521,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~wol_supported)
return -EINVAL;
- mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts);
+ mlx5_wol_mode = mlx5e_reformat_wol_mode_linux_to_mlx5(wol->wolopts);
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
@@ -1958,21 +1958,27 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-#ifndef CONFIG_MLX5_EN_RXNFC
-/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
- * otherwise this function will be defined from en_fs_ethtool.c
- */
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- if (info->cmd != ETHTOOL_GRXRINGS)
- return -EOPNOTSUPP;
- /* ring_count is needed by ethtool -x */
- info->data = priv->channels.params.num_channels;
- return 0;
+ /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
+ * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
+ * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
+ * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
+ */
+ if (info->cmd == ETHTOOL_GRXRINGS) {
+ info->data = priv->channels.params.num_channels;
+ return 0;
+ }
+
+ return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
+}
+
+static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
-#endif
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
@@ -1993,9 +1999,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
-#ifdef CONFIG_MLX5_EN_RXNFC
.set_rxnfc = mlx5e_set_rxnfc,
-#endif
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 76cc10e44080..15b7f0f1427c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -747,8 +747,55 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_GRE,
},
+ [MLX5E_TT_IPV4_IPIP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_IPIP,
+ },
+ [MLX5E_TT_IPV6_IPIP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_IPIP,
+ },
+ [MLX5E_TT_IPV4_IPV6] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_IPV6,
+ },
+ [MLX5E_TT_IPV6_IPV6] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_IPV6,
+ },
+
};
+bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type)
+{
+ switch (proto_type) {
+ case IPPROTO_GRE:
+ return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip);
+ default:
+ return false;
+ }
+}
+
+bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
+{
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
+ if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto))
+ return true;
+ }
+ return false;
+}
+
+bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
+{
+ return (mlx5e_any_tunnel_proto_supported(mdev) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
+}
+
static u8 mlx5e_etype_to_ipv(u16 ethertype)
{
if (ethertype == ETH_P_IP)
@@ -838,6 +885,9 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
+ if (!mlx5e_tunnel_proto_supported(priv->mdev,
+ ttc_tunnel_rules[tt].proto))
+ continue;
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_tunnel_rules[tt].etype,
ttc_tunnel_rules[tt].proto);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 94304abc49e9..eed7101e8bb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -888,10 +888,10 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- int err = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
@@ -911,16 +911,13 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
-int mlx5e_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
+int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = priv->channels.params.num_channels;
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 9d5f6e56188f..dadadf221087 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -56,12 +56,13 @@
#include "en/xdp.h"
#include "lib/eq.h"
#include "en/monitor_stats.h"
-#include "en/reporter.h"
+#include "en/health.h"
#include "en/params.h"
#include "en/xsk/umem.h"
#include "en/xsk/setup.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
+#include "en/hv_vhca_stats.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
@@ -247,26 +248,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
-{
- switch (rq->wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- default:
- return mlx5_wq_cyc_get_size(&rq->wqe.wq);
- }
-}
-
-static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
-{
- switch (rq->wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return rq->mpwqe.wq.cur_sz;
- default:
- return rq->wqe.wq.cur_sz;
- }
-}
-
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
struct mlx5e_channel *c)
{
@@ -382,6 +363,13 @@ static void mlx5e_free_di_list(struct mlx5e_rq *rq)
kvfree(rq->wqe.di);
}
+static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
+
+ mlx5e_reporter_rq_cqe_err(rq);
+}
+
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
@@ -418,6 +406,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
else
rq->stats = &c->priv->channel_stats[c->ix].rq;
+ INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
@@ -720,8 +709,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
return err;
}
-static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
- int next_state)
+int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -829,10 +817,11 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
+ mlx5e_reporter_rx_timeout(rq);
return -ETIMEDOUT;
}
-static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
__be16 wqe_ix_be;
u16 wqe_ix;
@@ -911,7 +900,7 @@ err_free_rq:
return err;
}
-static void mlx5e_activate_rq(struct mlx5e_rq *rq)
+void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
mlx5e_trigger_irq(&rq->channel->icosq);
@@ -926,6 +915,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
+ cancel_work_sync(&rq->channel->icosq.recover_work);
+ cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1042,6 +1033,14 @@ static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
return 0;
}
+static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
+ recover_work);
+
+ mlx5e_reporter_icosq_cqe_err(sq);
+}
+
static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5e_sq_param *param,
struct mlx5e_icosq *sq)
@@ -1064,6 +1063,8 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
+ INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
+
return 0;
err_sq_wq_destroy:
@@ -1130,6 +1131,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
+ if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev)) {
@@ -1377,7 +1380,7 @@ static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
- mlx5e_tx_reporter_err_cqe(sq);
+ mlx5e_reporter_tx_err_cqe(sq);
}
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
@@ -1393,7 +1396,6 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
- set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -1407,12 +1409,22 @@ err_free_icosq:
return err;
}
-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
{
- struct mlx5e_channel *c = sq->channel;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+}
- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_channel *c = icosq->channel;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
napi_synchronize(&c->napi);
+}
+
+void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_icosq(sq);
@@ -1430,7 +1442,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
return err;
csp.tis_lst_sz = 1;
- csp.tisn = c->priv->tisn[0]; /* tc = 0 */
+ csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
@@ -1680,7 +1692,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * priv->max_nch;
- err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
+ err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->sq, &c->sq[tc], tc);
if (err)
goto err_close_sqs;
@@ -1914,6 +1926,13 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_cq(&c->icosq.cq);
}
+static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
+{
+ u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
+
+ return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -1948,6 +1967,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
c->irq_desc = irq_to_desc(irq);
+ c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
err = mlx5e_alloc_xps_cpumask(c, params);
if (err)
@@ -1989,6 +2009,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
+ mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_rq(&c->rq);
netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
@@ -2004,6 +2025,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_xsk(c);
mlx5e_deactivate_rq(&c->rq);
+ mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
}
@@ -2321,10 +2343,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- if (!IS_ERR_OR_NULL(priv->tx_reporter))
- devlink_health_reporter_state_update(priv->tx_reporter,
- DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
-
+ mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
@@ -3168,40 +3187,58 @@ void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
mlx5_core_destroy_tis(mdev, tisn);
}
+void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+ int tc, i;
+
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
+}
+
+static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
+}
+
int mlx5e_create_tises(struct mlx5e_priv *priv)
{
+ int tc, i;
int err;
- int tc;
- for (tc = 0; tc < priv->profile->max_tc; tc++) {
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
+ for (tc = 0; tc < priv->profile->max_tc; tc++) {
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- MLX5_SET(tisc, tisc, prio, tc << 1);
+ MLX5_SET(tisc, tisc, prio, tc << 1);
- err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]);
- if (err)
- goto err_close_tises;
+ if (mlx5e_lag_should_assign_affinity(priv->mdev))
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
+
+ err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
+ if (err)
+ goto err_close_tises;
+ }
}
return 0;
err_close_tises:
- for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ for (; i >= 0; i--) {
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
+ tc = priv->profile->max_tc;
+ }
return err;
}
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
- int tc;
-
- mlx5e_tx_reporter_destroy(priv);
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5e_destroy_tises(priv);
}
static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
@@ -3422,7 +3459,7 @@ out:
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *cls_flower,
- int flags)
+ unsigned long flags)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
@@ -3442,12 +3479,12 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
- MLX5E_TC_NIC_OFFLOAD);
+ return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
default:
return -EOPNOTSUPP;
}
@@ -3463,11 +3500,15 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
- case TC_SETUP_BLOCK:
+ case TC_SETUP_BLOCK: {
+ struct flow_block_offload *f = type_data;
+
+ f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
&mlx5e_block_cb_list,
mlx5e_setup_tc_block_cb,
priv, priv, true);
+ }
#endif
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data);
@@ -3640,7 +3681,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) {
+ if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -3781,9 +3822,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
}
if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
- features &= ~NETIF_F_LRO;
- if (params->lro_en)
+ if (features & NETIF_F_LRO) {
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
+ features &= ~NETIF_F_LRO;
+ }
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
@@ -3950,7 +3992,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
- netdev_warn(priv->netdev, "Disabling cqe compression");
+ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+ netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
@@ -4202,6 +4245,8 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) {
case IPPROTO_GRE:
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
return features;
case IPPROTO_UDP:
udph = udp_hdr(skb);
@@ -4267,7 +4312,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!netif_xmit_stopped(dev_queue))
continue;
- if (mlx5e_tx_reporter_timeout(sq))
+ if (mlx5e_reporter_tx_timeout(sq))
report_failed = true;
}
@@ -4768,7 +4813,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
/* TX inline */
- params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
+ mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
/* RSS */
mlx5e_build_rss_params(rss_params, params->num_channels);
@@ -4838,7 +4883,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
- MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
+ mlx5e_any_tunnel_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
@@ -4853,7 +4898,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
- if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
+ if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
@@ -4862,6 +4907,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
}
+ if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) {
+ netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6;
+ netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6;
+ netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6;
+ }
+
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
@@ -4965,12 +5019,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
+ mlx5e_health_create_reporters(priv);
return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
+ mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_netdev_cleanup(priv->netdev, priv);
@@ -5073,7 +5129,6 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_initialize(priv);
#endif
- mlx5e_tx_reporter_create(priv);
return 0;
}
@@ -5097,6 +5152,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
+ mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -5129,6 +5185,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index d0684fdb69e1..1623cd32f303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -46,6 +46,8 @@
#include "en/tc_tun.h"
#include "fs_core.h"
#include "lib/port_tun.h"
+#define CREATE_TRACE_POINTS
+#include "diag/en_rep_tracepoint.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -389,24 +391,17 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
- struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
u64 parent_id;
priv = netdev_priv(dev);
- esw = priv->mdev->priv.eswitch;
-
- if (esw->mode == MLX5_ESWITCH_NONE)
- return -EOPNOTSUPP;
parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
ppid->id_len = sizeof(parent_id);
memcpy(ppid->id, &parent_id, sizeof(parent_id));
-
- return 0;
}
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
@@ -531,47 +526,97 @@ void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
neigh_update->min_interval);
}
+static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
+{
+ return refcount_inc_not_zero(&nhe->refcnt);
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
+
+static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
+{
+ if (refcount_dec_and_test(&nhe->refcnt)) {
+ mlx5e_rep_neigh_entry_remove(nhe);
+ kfree_rcu(nhe, rcu);
+ }
+}
+
+static struct mlx5e_neigh_hash_entry *
+mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh_hash_entry *next = NULL;
+
+ rcu_read_lock();
+
+ for (next = nhe ?
+ list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &nhe->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list) :
+ list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list);
+ next;
+ next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &next->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list))
+ if (mlx5e_rep_neigh_entry_hold(next))
+ break;
+
+ rcu_read_unlock();
+
+ if (nhe)
+ mlx5e_rep_neigh_entry_release(nhe);
+
+ return next;
+}
+
static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
neigh_update.neigh_stats_work.work);
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_neigh_hash_entry *nhe;
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
rtnl_lock();
if (!list_empty(&rpriv->neigh_update.neigh_list))
mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
+ while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
mlx5e_tc_update_neigh_used_value(nhe);
rtnl_unlock();
}
-static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
-{
- refcount_inc(&nhe->refcnt);
-}
-
-static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
-{
- if (refcount_dec_and_test(&nhe->refcnt))
- kfree(nhe);
-}
-
static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
bool neigh_connected,
unsigned char ha[ETH_ALEN])
{
struct ethhdr *eth = (struct ethhdr *)e->encap_header;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool encap_connected;
+ LIST_HEAD(flow_list);
ASSERT_RTNL();
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&e->res_ready);
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+ if (e->compl_result || (encap_connected == neigh_connected &&
+ ether_addr_equal(e->h_dest, ha)))
+ goto unlock;
+
+ mlx5e_take_all_encap_flows(e, &flow_list);
+
if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
(!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
- mlx5e_tc_encap_flows_del(priv, e);
+ mlx5e_tc_encap_flows_del(priv, e, &flow_list);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
ether_addr_copy(e->h_dest, ha);
@@ -581,8 +626,11 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
*/
ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
- mlx5e_tc_encap_flows_add(priv, e);
+ mlx5e_tc_encap_flows_add(priv, e, &flow_list);
}
+unlock:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ mlx5e_put_encap_flow_list(priv, &flow_list);
}
static void mlx5e_rep_neigh_update(struct work_struct *work)
@@ -594,7 +642,6 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
unsigned char ha[ETH_ALEN];
struct mlx5e_priv *priv;
bool neigh_connected;
- bool encap_connected;
u8 nud_state, dead;
rtnl_lock();
@@ -612,13 +659,15 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
neigh_connected = (nud_state & NUD_VALID) && !dead;
+ trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
+
list_for_each_entry(e, &nhe->encap_list, encap_list) {
- encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- priv = netdev_priv(e->out_dev);
+ if (!mlx5e_encap_take(e))
+ continue;
- if (encap_connected != neigh_connected ||
- !ether_addr_equal(e->h_dest, ha))
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ priv = netdev_priv(e->out_dev);
+ mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ mlx5e_encap_put(priv, e);
}
mlx5e_rep_neigh_entry_release(nhe);
rtnl_unlock();
@@ -659,8 +708,8 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
struct flow_cls_offload *flower,
struct mlx5e_rep_indr_block_priv *indr_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
- int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
int err = 0;
switch (flower->command) {
@@ -714,6 +763,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ f->unlocked_driver_cb = true;
f->driver_block_list = &mlx5e_block_cb_list;
switch (f->command) {
@@ -722,10 +772,6 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (indr_priv)
return -EEXIST;
- if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb,
- indr_priv, &mlx5e_block_cb_list))
- return -EBUSY;
-
indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
if (!indr_priv)
return -ENOMEM;
@@ -785,9 +831,9 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
{
int err;
- err = __tc_indr_block_cb_register(netdev, rpriv,
- mlx5e_rep_indr_setup_tc_cb,
- rpriv);
+ err = __flow_indr_block_cb_register(netdev, rpriv,
+ mlx5e_rep_indr_setup_tc_cb,
+ rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
@@ -800,8 +846,8 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
- __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
- rpriv);
+ __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
+ rpriv);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
@@ -827,6 +873,28 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void
+mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe,
+ struct neighbour *n)
+{
+ /* Take a reference to ensure the neighbour and mlx5 encap
+ * entry won't be destructed until we drop the reference in
+ * delayed work.
+ */
+ neigh_hold(n);
+
+ /* This assignment is valid as long as the the neigh reference
+ * is taken
+ */
+ nhe->n = n;
+
+ if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
+ mlx5e_rep_neigh_entry_release(nhe);
+ neigh_release(n);
+ }
+}
+
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
@@ -859,34 +927,13 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
m_neigh.family = n->ops->family;
memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- /* We are in atomic context and can't take RTNL mutex, so use
- * spin_lock_bh to lookup the neigh table. bh is used since
- * netevent can be called from a softirq context.
- */
- spin_lock_bh(&neigh_update->encap_lock);
+ rcu_read_lock();
nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
- if (!nhe) {
- spin_unlock_bh(&neigh_update->encap_lock);
+ rcu_read_unlock();
+ if (!nhe)
return NOTIFY_DONE;
- }
-
- /* This assignment is valid as long as the the neigh reference
- * is taken
- */
- nhe->n = n;
-
- /* Take a reference to ensure the neighbour and mlx5 encap
- * entry won't be destructed until we drop the reference in
- * delayed work.
- */
- neigh_hold(n);
- mlx5e_rep_neigh_entry_hold(nhe);
- if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
- mlx5e_rep_neigh_entry_release(nhe);
- neigh_release(n);
- }
- spin_unlock_bh(&neigh_update->encap_lock);
+ mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
break;
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -903,19 +950,15 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
#endif
return NOTIFY_DONE;
- /* We are in atomic context and can't take RTNL mutex,
- * so use spin_lock_bh to walk the neigh list and look for
- * the relevant device. bh is used since netevent can be
- * called from a softirq context.
- */
- spin_lock_bh(&neigh_update->encap_lock);
- list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
+ neigh_list) {
if (p->dev == nhe->m_neigh.dev) {
found = true;
break;
}
}
- spin_unlock_bh(&neigh_update->encap_lock);
+ rcu_read_unlock();
if (!found)
return NOTIFY_DONE;
@@ -946,7 +989,7 @@ static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
return err;
INIT_LIST_HEAD(&neigh_update->neigh_list);
- spin_lock_init(&neigh_update->encap_lock);
+ mutex_init(&neigh_update->encap_lock);
INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
mlx5e_rep_neigh_stats_work);
mlx5e_rep_neigh_update_init_interval(rpriv);
@@ -973,6 +1016,7 @@ static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
+ mutex_destroy(&neigh_update->encap_lock);
rhashtable_destroy(&neigh_update->neigh_ht);
}
@@ -988,28 +1032,27 @@ static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
if (err)
return err;
- list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
+ list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
return err;
}
-static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe)
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
- spin_lock_bh(&rpriv->neigh_update.encap_lock);
+ mutex_lock(&rpriv->neigh_update.encap_lock);
- list_del(&nhe->neigh_list);
+ list_del_rcu(&nhe->neigh_list);
rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
&nhe->rhash_node,
mlx5e_neigh_ht_params);
- spin_unlock_bh(&rpriv->neigh_update.encap_lock);
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
}
-/* This function must only be called under RTNL lock or under the
- * representor's encap_lock in case RTNL mutex can't be held.
+/* This function must only be called under the representor's encap_lock or
+ * inside rcu read lock section.
*/
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
@@ -1017,9 +1060,11 @@ mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_neigh_hash_entry *nhe;
- return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
- mlx5e_neigh_ht_params);
+ nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
+ mlx5e_neigh_ht_params);
+ return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
}
static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
@@ -1032,8 +1077,10 @@ static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
if (!*nhe)
return -ENOMEM;
+ (*nhe)->priv = priv;
memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
+ spin_lock_init(&(*nhe)->encap_list_lock);
INIT_LIST_HEAD(&(*nhe)->encap_list);
refcount_set(&(*nhe)->refcnt, 1);
@@ -1047,19 +1094,6 @@ out_free:
return err;
}
-static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe)
-{
- /* The neigh hash entry must be removed from the hash table regardless
- * of the reference count value, so it won't be found by the next
- * neigh notification call. The neigh hash entry reference count is
- * incremented only during creation and neigh notification calls and
- * protects from freeing the nhe struct.
- */
- mlx5e_rep_neigh_entry_remove(priv, nhe);
- mlx5e_rep_neigh_entry_release(nhe);
-}
-
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
@@ -1072,16 +1106,26 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
if (err)
return err;
+
+ mutex_lock(&rpriv->neigh_update.encap_lock);
nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
if (!nhe) {
err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
if (err) {
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
mlx5_tun_entropy_refcount_dec(tun_entropy,
e->reformat_type);
return err;
}
}
- list_add(&e->encap_list, &nhe->encap_list);
+
+ e->nhe = nhe;
+ spin_lock(&nhe->encap_list_lock);
+ list_add_rcu(&e->encap_list, &nhe->encap_list);
+ spin_unlock(&nhe->encap_list_lock);
+
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+
return 0;
}
@@ -1091,13 +1135,16 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
- struct mlx5e_neigh_hash_entry *nhe;
- list_del(&e->encap_list);
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ if (!e->nhe)
+ return;
- if (list_empty(&nhe->encap_list))
- mlx5e_rep_neigh_entry_destroy(priv, nhe);
+ spin_lock(&e->nhe->encap_list_lock);
+ list_del_rcu(&e->encap_list);
+ spin_unlock(&e->nhe->encap_list_lock);
+
+ mlx5e_rep_neigh_entry_release(e->nhe);
+ e->nhe = NULL;
mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
}
@@ -1160,15 +1207,34 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
}
}
+static
+int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ switch (ma->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlx5e_tc_configure_matchall(priv, ma);
+ case TC_CLSMATCHALL_DESTROY:
+ return mlx5e_tc_delete_matchall(priv, ma);
+ case TC_CLSMATCHALL_STATS:
+ mlx5e_tc_stats_matchall(priv, ma);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
- MLX5E_TC_ESW_OFFLOAD);
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
+ case TC_SETUP_CLSMATCHALL:
+ return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -1180,9 +1246,11 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct flow_block_offload *f = type_data;
switch (type) {
case TC_SETUP_BLOCK:
+ f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
&mlx5e_rep_block_cb_list,
mlx5e_rep_setup_tc_cb,
@@ -1553,7 +1621,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv;
- int tc, err;
+ int err;
err = mlx5e_create_tises(priv);
if (err) {
@@ -1564,6 +1632,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
uplink_priv = &rpriv->uplink_priv;
+ mutex_init(&uplink_priv->unready_flows_lock);
INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */
@@ -1588,18 +1657,15 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
tc_esw_cleanup:
mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
destroy_tises:
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5e_destroy_tises(priv);
return err;
}
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- int tc;
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5e_destroy_tises(priv);
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
/* clean indirect TC block notifications */
@@ -1608,6 +1674,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
/* delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
}
@@ -1731,37 +1798,46 @@ is_devlink_port_supported(const struct mlx5_core_dev *dev,
mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
}
+static unsigned int
+vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
+{
+ return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
+}
+
static int register_devlink_port(struct mlx5_core_dev *dev,
struct mlx5e_rep_priv *rpriv)
{
struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct netdev_phys_item_id ppid = {};
- int ret;
+ unsigned int dl_port_index = 0;
if (!is_devlink_port_supported(dev, rpriv))
return 0;
- ret = mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
- if (ret)
- return ret;
+ mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
- if (rep->vport == MLX5_VPORT_UPLINK)
+ if (rep->vport == MLX5_VPORT_UPLINK) {
devlink_port_attrs_set(&rpriv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
PCI_FUNC(dev->pdev->devfn), false, 0,
&ppid.id[0], ppid.id_len);
- else if (rep->vport == MLX5_VPORT_PF)
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+ } else if (rep->vport == MLX5_VPORT_PF) {
devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
dev->pdev->devfn);
- else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
+ dl_port_index = rep->vport;
+ } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
+ rpriv->rep->vport)) {
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
dev->pdev->devfn,
rep->vport - 1);
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+ }
- return devlink_port_register(devlink, &rpriv->dl_port, rep->vport);
+ return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
}
static void unregister_devlink_port(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index c56e6ee4350c..a0ae5069d8c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -35,6 +35,7 @@
#include <net/ip_tunnels.h>
#include <linux/rhashtable.h>
+#include <linux/mutex.h>
#include "eswitch.h"
#include "en.h"
#include "lib/port_tun.h"
@@ -48,7 +49,7 @@ struct mlx5e_neigh_update_table {
*/
struct list_head neigh_list;
/* protect lookup/remove operations */
- spinlock_t encap_lock;
+ struct mutex encap_lock;
struct notifier_block netevent_nb;
struct delayed_work neigh_stats_work;
unsigned long min_interval; /* jiffies */
@@ -75,6 +76,8 @@ struct mlx5_rep_uplink_priv {
struct mlx5_tun_entropy tun_entropy;
+ /* protects unready_flows */
+ struct mutex unready_flows_lock;
struct list_head unready_flows;
struct work_struct reoffload_flows_work;
};
@@ -86,6 +89,7 @@ struct mlx5e_rep_priv {
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
+ struct rtnl_link_stats64 prev_vf_vport_stats;
struct devlink_port dl_port;
};
@@ -107,6 +111,7 @@ struct mlx5e_neigh {
struct mlx5e_neigh_hash_entry {
struct rhash_head rhash_node;
struct mlx5e_neigh m_neigh;
+ struct mlx5e_priv *priv;
/* Save the neigh hash entry in a list on the representor in
* addition to the hash table. In order to iterate easily over the
@@ -114,6 +119,8 @@ struct mlx5e_neigh_hash_entry {
*/
struct list_head neigh_list;
+ /* protects encap list */
+ spinlock_t encap_list_lock;
/* encap list sharing the same neigh */
struct list_head encap_list;
@@ -134,6 +141,8 @@ struct mlx5e_neigh_hash_entry {
* 'used' value and avoid neigh deleting by the kernel.
*/
unsigned long reported_lastuse;
+
+ struct rcu_head rcu;
};
enum {
@@ -142,6 +151,8 @@ enum {
};
struct mlx5e_encap_entry {
+ /* attached neigh hash entry */
+ struct mlx5e_neigh_hash_entry *nhe;
/* neigh hash entry list of encaps sharing the same neigh */
struct list_head encap_list;
struct mlx5e_neigh m_neigh;
@@ -161,6 +172,10 @@ struct mlx5e_encap_entry {
u8 flags;
char *encap_header;
int encap_size;
+ refcount_t refcnt;
+ struct completion res_ready;
+ int compl_result;
+ struct rcu_head rcu;
};
struct mlx5e_rep_sq {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ac6e586d403d..2fd2760d0bb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -48,6 +48,7 @@
#include "lib/clock.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
+#include "en/health.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -615,6 +616,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
do {
@@ -859,13 +862,24 @@ tail_padding_csum(struct sk_buff *skb, int offset,
}
static void
-mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
- struct mlx5e_rq_stats *stats)
+mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
+ struct mlx5e_rq_stats *stats)
{
struct ipv6hdr *ip6;
struct iphdr *ip4;
int pkt_len;
+ /* Fixup vlan headers, if any */
+ if (network_depth > ETH_HLEN)
+ /* CQE csum is calculated from the IP header and does
+ * not cover VLAN headers (if present). This will add
+ * the checksum manually.
+ */
+ skb->csum = csum_partial(skb->data + ETH_HLEN,
+ network_depth - ETH_HLEN,
+ skb->csum);
+
+ /* Fixup tail padding, if any */
switch (proto) {
case htons(ETH_P_IP):
ip4 = (struct iphdr *)(skb->data + network_depth);
@@ -931,16 +945,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return; /* CQE csum covers all received bytes */
/* csum might need some fixups ...*/
- if (network_depth > ETH_HLEN)
- /* CQE csum is calculated from the IP header and does
- * not cover VLAN headers (if present). This will add
- * the checksum manually.
- */
- skb->csum = csum_partial(skb->data + ETH_HLEN,
- network_depth - ETH_HLEN,
- skb->csum);
-
- mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
+ mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
return;
}
@@ -1065,11 +1070,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetchw(va); /* xdp_frame data area */
prefetch(data);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
- rq->stats->wqe_err++;
- return NULL;
- }
-
rcu_read_lock();
consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
rcu_read_unlock();
@@ -1097,11 +1097,6 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 byte_cnt = cqe_bcnt - headlen;
struct sk_buff *skb;
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
- rq->stats->wqe_err++;
- return NULL;
- }
-
/* XDP is not supported in this configuration, as incoming packets
* might spread among multiple pages.
*/
@@ -1135,6 +1130,15 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return skb;
}
+static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+
+ if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
+ !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state))
+ queue_work(rq->channel->priv->wq, &rq->recover_work);
+}
+
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@ -1147,6 +1151,12 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
@@ -1188,6 +1198,11 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
@@ -1322,7 +1337,8 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi->consumed_strides += cstrides;
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
rq->stats->wqe_err++;
goto mpwrq_cqe_out;
}
@@ -1498,6 +1514,11 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto wq_free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
@@ -1533,26 +1554,27 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto wq_free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
- if (unlikely(!skb)) {
- /* a DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi, true);
- goto wq_cyc_pop;
- }
+ if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
+ goto wq_free_wqe;
+
skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
- if (unlikely(!skb)) {
- mlx5e_free_rx_wqe(rq, wi, true);
- goto wq_cyc_pop;
- }
+ if (unlikely(!skb))
+ goto wq_free_wqe;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
+wq_free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
-wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 57f9f346d213..f1065e78086a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -74,6 +74,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
@@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
@@ -107,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
@@ -200,6 +203,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
+ s->rx_xdp_tx_nops += xdpsq_stats->nops;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
@@ -217,6 +221,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_waive += rq_stats->cache_waive;
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
+ s->rx_recover += rq_stats->recover;
s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
@@ -227,6 +232,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
+ s->tx_xdp_nops += xdpsq_red_stats->nops;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
@@ -363,17 +369,27 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
}
#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
-static const struct counter_desc vnic_env_stats_desc[] = {
+static const struct counter_desc vnic_env_stats_steer_desc[] = {
{ "rx_steer_missed_packets",
VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
};
-#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
+static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
+ { "dev_internal_queue_oob",
+ VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
+};
+
+#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
+ ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
+#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
+ ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
{
- return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
- NUM_VNIC_ENV_COUNTERS : 0;
+ return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
+ NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
}
static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
@@ -381,12 +397,13 @@ static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
{
int i;
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
- return idx;
+ for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vnic_env_stats_steer_desc[i].format);
- for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
+ for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_desc[i].format);
+ vnic_env_stats_dev_oob_desc[i].format);
return idx;
}
@@ -395,12 +412,13 @@ static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
{
int i;
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
- return idx;
-
- for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
+ for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_desc, i);
+ vnic_env_stats_steer_desc, i);
+
+ for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_dev_oob_desc, i);
return idx;
}
@@ -1294,6 +1312,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1331,6 +1350,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
@@ -1340,6 +1360,7 @@ static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 76ac111e14d0..c281e567711d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -81,6 +81,7 @@ struct mlx5e_sw_stats {
u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_mpwqe;
u64 rx_xdp_tx_inlnw;
+ u64 rx_xdp_tx_nops;
u64 rx_xdp_tx_full;
u64 rx_xdp_tx_err;
u64 rx_xdp_tx_cqe;
@@ -97,6 +98,7 @@ struct mlx5e_sw_stats {
u64 tx_xdp_xmit;
u64 tx_xdp_mpwqe;
u64 tx_xdp_inlnw;
+ u64 tx_xdp_nops;
u64 tx_xdp_full;
u64 tx_xdp_err;
u64 tx_xdp_cqes;
@@ -114,6 +116,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_waive;
u64 rx_congst_umr;
u64 rx_arfs_err;
+ u64 rx_recover;
u64 ch_events;
u64 ch_poll;
u64 ch_arm;
@@ -247,6 +250,7 @@ struct mlx5e_rq_stats {
u64 cache_waive;
u64 congst_umr;
u64 arfs_err;
+ u64 recover;
};
struct mlx5e_sq_stats {
@@ -288,6 +292,7 @@ struct mlx5e_xdpsq_stats {
u64 xmit;
u64 mpwqe;
u64 inlnw;
+ u64 nops;
u64 full;
u64 err;
/* dirtied @completion */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 00b2d4a86159..5581a8045ede 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -38,6 +38,8 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -54,6 +56,7 @@
#include "en/tc_tun.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
+#include "diag/en_tc_tracepoint.h"
struct mlx5_nic_flow_attr {
u32 action;
@@ -65,19 +68,20 @@ struct mlx5_nic_flow_attr {
struct mlx5_fc *counter;
};
-#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
+#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
enum {
- MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
- MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
- MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
- MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
- MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
- MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
- MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
- MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
- MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
- MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5),
+ MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
+ MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
+ MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
+ MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
+ MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -100,6 +104,7 @@ enum {
* container_of(helper item, containing struct type, helper field[index])
*/
struct encap_flow_item {
+ struct mlx5e_encap_entry *e; /* attached encap instance */
struct list_head list;
int index;
};
@@ -108,7 +113,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
- u16 flags;
+ unsigned long flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
/* Flow can be associated with multiple encap IDs.
* The number of encaps is bounded by the number of supported
@@ -116,10 +121,17 @@ struct mlx5e_tc_flow {
*/
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
+ struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
+ int tmp_efi_index;
+ struct list_head tmp_list; /* temporary flow list used by neigh update */
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+ struct completion init_done;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -157,12 +169,20 @@ struct mlx5e_hairpin_entry {
/* a node of a hash table which keeps all the hairpin entries */
struct hlist_node hairpin_hlist;
+ /* protects flows list */
+ spinlock_t flows_lock;
/* flows sharing the same hairpin */
struct list_head flows;
+ /* hpe's that were not fully initialized when dead peer update event
+ * function traversed them.
+ */
+ struct list_head dead_peer_wait_list;
u16 peer_vhca_id;
u8 prio;
struct mlx5e_hairpin *hp;
+ refcount_t refcnt;
+ struct completion res_ready;
};
struct mod_hdr_key {
@@ -174,16 +194,93 @@ struct mlx5e_mod_hdr_entry {
/* a node of a hash table which keeps all the mod_hdr entries */
struct hlist_node mod_hdr_hlist;
+ /* protects flows list */
+ spinlock_t flows_lock;
/* flows sharing the same mod_hdr entry */
struct list_head flows;
struct mod_hdr_key key;
u32 mod_hdr_id;
+
+ refcount_t refcnt;
+ struct completion res_ready;
+ int compl_result;
};
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
+static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
+{
+ if (!flow || !refcount_inc_not_zero(&flow->refcnt))
+ return ERR_PTR(-EINVAL);
+ return flow;
+}
+
+static void mlx5e_flow_put(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ if (refcount_dec_and_test(&flow->refcnt)) {
+ mlx5e_tc_del_flow(priv, flow);
+ kfree_rcu(flow, rcu_head);
+ }
+}
+
+static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before setting bit. */
+ smp_mb__before_atomic();
+ set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
+ unsigned long flag)
+{
+ /* test_and_set_bit() provides all necessary barriers */
+ return test_and_set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_test_and_set(flow, flag) \
+ __flow_flag_test_and_set(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before clearing bit. */
+ smp_mb__before_atomic();
+ clear_bit(flag, &flow->flags);
+}
+
+#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ bool ret = test_bit(flag, &flow->flags);
+
+ /* Read fields of flow structure only after checking flags. */
+ smp_mb__after_atomic();
+ return ret;
+}
+
+#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, ESWITCH);
+}
+
+static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, OFFLOADED);
+}
+
static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
{
return jhash(key->actions,
@@ -199,15 +296,62 @@ static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
}
+static struct mod_hdr_tbl *
+get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
+ &priv->fs.tc.mod_hdr;
+}
+
+static struct mlx5e_mod_hdr_entry *
+mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
+{
+ struct mlx5e_mod_hdr_entry *mh, *found = NULL;
+
+ hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, key)) {
+ refcount_inc(&mh->refcnt);
+ found = mh;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
+ struct mlx5e_mod_hdr_entry *mh,
+ int namespace)
+{
+ struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
+
+ if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
+ return;
+ hash_del(&mh->mod_hdr_hlist);
+ mutex_unlock(&tbl->lock);
+
+ WARN_ON(!list_empty(&mh->flows));
+ if (mh->compl_result > 0)
+ mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
+
+ kfree(mh);
+}
+
+static int get_flow_name_space(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ?
+ MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
+}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int num_actions, actions_size, namespace, err;
struct mlx5e_mod_hdr_entry *mh;
+ struct mod_hdr_tbl *tbl;
struct mod_hdr_key key;
- bool found = false;
u32 hash_key;
num_actions = parse_attr->num_mod_hdr_actions;
@@ -218,80 +362,82 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
hash_key = hash_mod_hdr_info(&key);
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- namespace = MLX5_FLOW_NAMESPACE_FDB;
- hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
- mod_hdr_hlist, hash_key) {
- if (!cmp_mod_hdr_info(&mh->key, &key)) {
- found = true;
- break;
- }
- }
- } else {
- namespace = MLX5_FLOW_NAMESPACE_KERNEL;
- hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
- mod_hdr_hlist, hash_key) {
- if (!cmp_mod_hdr_info(&mh->key, &key)) {
- found = true;
- break;
- }
- }
- }
+ namespace = get_flow_name_space(flow);
+ tbl = get_mod_hdr_table(priv, namespace);
- if (found)
+ mutex_lock(&tbl->lock);
+ mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
+ if (mh) {
+ mutex_unlock(&tbl->lock);
+ wait_for_completion(&mh->res_ready);
+
+ if (mh->compl_result < 0) {
+ err = -EREMOTEIO;
+ goto attach_header_err;
+ }
goto attach_flow;
+ }
mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
- if (!mh)
+ if (!mh) {
+ mutex_unlock(&tbl->lock);
return -ENOMEM;
+ }
mh->key.actions = (void *)mh + sizeof(*mh);
memcpy(mh->key.actions, key.actions, actions_size);
mh->key.num_actions = num_actions;
+ spin_lock_init(&mh->flows_lock);
INIT_LIST_HEAD(&mh->flows);
+ refcount_set(&mh->refcnt, 1);
+ init_completion(&mh->res_ready);
+
+ hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
+ mutex_unlock(&tbl->lock);
err = mlx5_modify_header_alloc(priv->mdev, namespace,
mh->key.num_actions,
mh->key.actions,
&mh->mod_hdr_id);
- if (err)
- goto out_err;
-
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
- hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
- else
- hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+ if (err) {
+ mh->compl_result = err;
+ goto alloc_header_err;
+ }
+ mh->compl_result = 1;
+ complete_all(&mh->res_ready);
attach_flow:
+ flow->mh = mh;
+ spin_lock(&mh->flows_lock);
list_add(&flow->mod_hdr, &mh->flows);
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ spin_unlock(&mh->flows_lock);
+ if (mlx5e_is_eswitch_flow(flow))
flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
else
flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
return 0;
-out_err:
- kfree(mh);
+alloc_header_err:
+ complete_all(&mh->res_ready);
+attach_header_err:
+ mlx5e_mod_hdr_put(priv, mh, namespace);
return err;
}
static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct list_head *next = flow->mod_hdr.next;
+ /* flow wasn't fully initialized */
+ if (!flow->mh)
+ return;
+ spin_lock(&flow->mh->flows_lock);
list_del(&flow->mod_hdr);
+ spin_unlock(&flow->mh->flows_lock);
- if (list_empty(next)) {
- struct mlx5e_mod_hdr_entry *mh;
-
- mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
-
- mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
- hash_del(&mh->mod_hdr_hlist);
- kfree(mh);
- }
+ mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
+ flow->mh = NULL;
}
static
@@ -555,13 +701,35 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
- if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
+ if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
+ refcount_inc(&hpe->refcnt);
return hpe;
+ }
}
return NULL;
}
+static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
+ struct mlx5e_hairpin_entry *hpe)
+{
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
+ return;
+ hash_del(&hpe->hairpin_hlist);
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+
+ if (!IS_ERR_OR_NULL(hpe->hp)) {
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ dev_name(hpe->hp->pair->peer_mdev->device));
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ }
+
+ WARN_ON(!list_empty(&hpe->flows));
+ kfree(hpe);
+}
+
#define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
@@ -627,17 +795,37 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
extack);
if (err)
return err;
+
+ mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
- if (hpe)
+ if (hpe) {
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ wait_for_completion(&hpe->res_ready);
+
+ if (IS_ERR(hpe->hp)) {
+ err = -EREMOTEIO;
+ goto out_err;
+ }
goto attach_flow;
+ }
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
- if (!hpe)
+ if (!hpe) {
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
return -ENOMEM;
+ }
+ spin_lock_init(&hpe->flows_lock);
INIT_LIST_HEAD(&hpe->flows);
+ INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
hpe->peer_vhca_id = peer_id;
hpe->prio = match_prio;
+ refcount_set(&hpe->refcnt, 1);
+ init_completion(&hpe->res_ready);
+
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_hairpin_info(peer_id, match_prio));
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
params.log_data_size = 15;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -659,9 +847,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
params.num_channels = link_speed64;
hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ hpe->hp = hp;
+ complete_all(&hpe->res_ready);
if (IS_ERR(hp)) {
err = PTR_ERR(hp);
- goto create_hairpin_err;
+ goto out_err;
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
@@ -669,46 +859,39 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
- hpe->hp = hp;
- hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
- hash_hairpin_info(peer_id, match_prio));
-
attach_flow:
if (hpe->hp->num_channels > 1) {
- flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
+ flow_flag_set(flow, HAIRPIN_RSS);
flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
} else {
flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
}
+
+ flow->hpe = hpe;
+ spin_lock(&hpe->flows_lock);
list_add(&flow->hairpin, &hpe->flows);
+ spin_unlock(&hpe->flows_lock);
return 0;
-create_hairpin_err:
- kfree(hpe);
+out_err:
+ mlx5e_hairpin_put(priv, hpe);
return err;
}
static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct list_head *next = flow->hairpin.next;
+ /* flow wasn't fully initialized */
+ if (!flow->hpe)
+ return;
+ spin_lock(&flow->hpe->flows_lock);
list_del(&flow->hairpin);
+ spin_unlock(&flow->hpe->flows_lock);
- /* no more hairpin flows for us, release the hairpin pair */
- if (list_empty(next)) {
- struct mlx5e_hairpin_entry *hpe;
-
- hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
-
- netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
- dev_name(hpe->hp->pair->peer_mdev->device));
-
- mlx5e_hairpin_destroy(hpe->hp);
- hash_del(&hpe->hairpin_hlist);
- kfree(hpe);
- }
+ mlx5e_hairpin_put(priv, flow->hpe);
+ flow->hpe = NULL;
}
static int
@@ -727,18 +910,17 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
.flags = FLOW_ACT_NO_APPEND,
};
struct mlx5_fc *counter = NULL;
- bool table_created = false;
int err, dest_ix = 0;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = attr->flow_tag;
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ if (flow_flag_test(flow, HAIRPIN)) {
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
- if (err) {
- goto err_add_hairpin_flow;
- }
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
+ if (err)
+ return err;
+
+ if (flow_flag_test(flow, HAIRPIN_RSS)) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dest_ix].ft = attr->hairpin_ft;
} else {
@@ -754,10 +936,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
- goto err_fc_create;
- }
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[dest_ix].counter_id = mlx5_fc_id(counter);
dest_ix++;
@@ -769,9 +950,10 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
flow_act.modify_id = attr->mod_hdr_id;
kfree(parse_attr->mod_hdr_actions);
if (err)
- goto err_create_mod_hdr_id;
+ return err;
}
+ mutex_lock(&priv->fs.tc.t_lock);
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
int tc_grp_size, tc_tbl_size;
u32 max_flow_counter;
@@ -791,15 +973,13 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_TABLE_NUM_GROUPS,
MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
+ mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
"Failed to create tc offload table\n");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- err = PTR_ERR(priv->fs.tc.t);
- goto err_create_ft;
+ return PTR_ERR(priv->fs.tc.t);
}
-
- table_created = true;
}
if (attr->match_level != MLX5_MATCH_NONE)
@@ -807,29 +987,12 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix);
+ mutex_unlock(&priv->fs.tc.t_lock);
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
- goto err_add_rule;
- }
+ if (IS_ERR(flow->rule[0]))
+ return PTR_ERR(flow->rule[0]);
return 0;
-
-err_add_rule:
- if (table_created) {
- mlx5_destroy_flow_table(priv->fs.tc.t);
- priv->fs.tc.t = NULL;
- }
-err_create_ft:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5e_detach_mod_hdr(priv, flow);
-err_create_mod_hdr_id:
- mlx5_fc_destroy(dev, counter);
-err_fc_create:
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
- mlx5e_hairpin_flow_del(priv, flow);
-err_add_hairpin_flow:
- return err;
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
@@ -839,18 +1002,21 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
counter = attr->counter;
- mlx5_del_flow_rules(flow->rule[0]);
+ if (!IS_ERR_OR_NULL(flow->rule[0]))
+ mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
+ mutex_lock(&priv->fs.tc.t_lock);
+ if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
+ mutex_unlock(&priv->fs.tc.t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
}
@@ -885,7 +1051,6 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
}
}
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
return rule;
}
@@ -894,7 +1059,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_esw_flow_attr *attr)
{
- flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ flow_flag_clear(flow, OFFLOADED);
if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -917,7 +1082,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
- flow->flags |= MLX5E_TC_FLOW_SLOW;
+ flow_flag_set(flow, SLOW);
return rule;
}
@@ -932,7 +1097,26 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
slow_attr->split_count = 0;
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
- flow->flags &= ~MLX5E_TC_FLOW_SLOW;
+ flow_flag_clear(flow, SLOW);
+}
+
+/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
+ * function.
+ */
+static void unready_flow_add(struct mlx5e_tc_flow *flow,
+ struct list_head *unready_flows)
+{
+ flow_flag_set(flow, NOT_READY);
+ list_add_tail(&flow->unready, unready_flows);
+}
+
+/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
+ * function.
+ */
+static void unready_flow_del(struct mlx5e_tc_flow *flow)
+{
+ list_del(&flow->unready);
+ flow_flag_clear(flow, NOT_READY);
}
static void add_unready_flow(struct mlx5e_tc_flow *flow)
@@ -945,14 +1129,24 @@ static void add_unready_flow(struct mlx5e_tc_flow *flow)
rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &rpriv->uplink_priv;
- flow->flags |= MLX5E_TC_FLOW_NOT_READY;
- list_add_tail(&flow->unready, &uplink_priv->unready_flows);
+ mutex_lock(&uplink_priv->unready_flows_lock);
+ unready_flow_add(flow, &uplink_priv->unready_flows);
+ mutex_unlock(&uplink_priv->unready_flows_lock);
}
static void remove_unready_flow(struct mlx5e_tc_flow *flow)
{
- list_del(&flow->unready);
- flow->flags &= ~MLX5E_TC_FLOW_NOT_READY;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+
+ esw = flow->priv->mdev->priv.eswitch;
+ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &rpriv->uplink_priv;
+
+ mutex_lock(&uplink_priv->unready_flows_lock);
+ unready_flow_del(flow);
+ mutex_unlock(&uplink_priv->unready_flows_lock);
}
static int
@@ -980,14 +1174,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
- err = -EOPNOTSUPP;
- goto err_max_prio_chain;
+ return -EOPNOTSUPP;
}
if (attr->prio > max_prio) {
NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
- err = -EOPNOTSUPP;
- goto err_max_prio_chain;
+ return -EOPNOTSUPP;
}
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
@@ -1002,7 +1194,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
if (err)
- goto err_attach_encap;
+ return err;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
@@ -1012,21 +1204,19 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
- goto err_add_vlan;
+ return err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
if (err)
- goto err_mod_hdr;
+ return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(attr->counter_dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
- goto err_create_counter;
- }
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
attr->counter = counter;
}
@@ -1044,27 +1234,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
}
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
- goto err_add_rule;
- }
+ if (IS_ERR(flow->rule[0]))
+ return PTR_ERR(flow->rule[0]);
+ else
+ flow_flag_set(flow, OFFLOADED);
return 0;
-
-err_add_rule:
- mlx5_fc_destroy(attr->counter_dev, counter);
-err_create_counter:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5e_detach_mod_hdr(priv, flow);
-err_mod_hdr:
- mlx5_eswitch_del_vlan_action(esw, attr);
-err_add_vlan:
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
- mlx5e_detach_encap(priv, flow, out_index);
-err_attach_encap:
-err_max_prio_chain:
- return err;
}
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
@@ -1088,14 +1263,14 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr;
int out_index;
- if (flow->flags & MLX5E_TC_FLOW_NOT_READY) {
+ if (flow_flag_test(flow, NOT_READY)) {
remove_unready_flow(flow);
kvfree(attr->parse_attr);
return;
}
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- if (flow->flags & MLX5E_TC_FLOW_SLOW)
+ if (mlx5e_is_offloaded_flow(flow)) {
+ if (flow_flag_test(flow, SLOW))
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
@@ -1119,13 +1294,13 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr slow_attr, *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
@@ -1142,16 +1317,17 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(efi, &e->flows, list) {
+ list_for_each_entry(flow, flow_list, tmp_list) {
bool all_flow_encaps_valid = true;
int i;
- flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
esw_attr = flow->esw_attr;
spec = &esw_attr->parse_attr->spec;
- esw_attr->dests[efi->index].encap_id = e->encap_id;
- esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_efi_index].encap_id = e->encap_id;
+ esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Flow can be associated with multiple encap entries.
* Before offloading the flow verify that all of them have
* a valid neighbour.
@@ -1177,30 +1353,32 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
}
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
flow->rule[0] = rule;
+ /* was unset when slow path rule removed */
+ flow_flag_set(flow, OFFLOADED);
}
}
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
- list_for_each_entry(efi, &e->flows, list) {
- flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ list_for_each_entry(flow, flow_list, tmp_list) {
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
/* mark the flow's encap dest as non-valid */
- flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1210,8 +1388,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
flow->rule[0] = rule;
+ /* was unset when fast path rule removed */
+ flow_flag_set(flow, OFFLOADED);
}
/* we know that the encap is valid */
@@ -1221,17 +1400,90 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ if (mlx5e_is_eswitch_flow(flow))
return flow->esw_attr->counter;
else
return flow->nic_attr->counter;
}
+/* Takes reference to all flows attached to encap and adds the flows to
+ * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
+ */
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
+{
+ struct encap_flow_item *efi;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ continue;
+ wait_for_completion(&flow->init_done);
+
+ flow->tmp_efi_index = efi->index;
+ list_add(&flow->tmp_list, flow_list);
+ }
+}
+
+/* Iterate over tmp_list of flows attached to flow_list head. */
+void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
+{
+ struct mlx5e_tc_flow *flow, *tmp;
+
+ list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
+ mlx5e_flow_put(priv, flow);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_encap_entry *next = NULL;
+
+retry:
+ rcu_read_lock();
+
+ /* find encap with non-zero reference counter value */
+ for (next = e ?
+ list_next_or_null_rcu(&nhe->encap_list,
+ &e->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list) :
+ list_first_or_null_rcu(&nhe->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list);
+ next;
+ next = list_next_or_null_rcu(&nhe->encap_list,
+ &next->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list))
+ if (mlx5e_encap_take(next))
+ break;
+
+ rcu_read_unlock();
+
+ /* release starting encap */
+ if (e)
+ mlx5e_encap_put(netdev_priv(e->out_dev), e);
+ if (!next)
+ return next;
+
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&next->res_ready);
+ /* continue searching if encap entry is not in valid state after completion */
+ if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ e = next;
+ goto retry;
+ }
+
+ return next;
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+ struct mlx5e_encap_entry *e = NULL;
struct mlx5e_tc_flow *flow;
- struct mlx5e_encap_entry *e;
struct mlx5_fc *counter;
struct neigh_table *tbl;
bool neigh_used = false;
@@ -1247,14 +1499,25 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
else
return;
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- struct encap_flow_item *efi;
- if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
- continue;
- list_for_each_entry(efi, &e->flows, list) {
+ /* mlx5e_get_next_valid_encap() releases previous encap before returning
+ * next one.
+ */
+ while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
+ struct mlx5e_priv *priv = netdev_priv(e->out_dev);
+ struct encap_flow_item *efi, *tmp;
+ struct mlx5_eswitch *esw;
+ LIST_HEAD(flow_list);
+
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_for_each_entry_safe(efi, tmp, &e->flows, list) {
flow = container_of(efi, struct mlx5e_tc_flow,
encaps[efi->index]);
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ continue;
+ list_add(&flow->tmp_list, &flow_list);
+
+ if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
lastuse = mlx5_fc_query_lastuse(counter);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
@@ -1263,10 +1526,18 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
}
}
- if (neigh_used)
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_put_encap_flow_list(priv, &flow_list);
+ if (neigh_used) {
+ /* release current encap before breaking the loop */
+ mlx5e_encap_put(priv, e);
break;
+ }
}
+ trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
+
if (neigh_used) {
nhe->reported_lastuse = jiffies;
@@ -1282,40 +1553,69 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
}
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index)
+static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
{
- struct list_head *next = flow->encaps[out_index].list.next;
+ WARN_ON(!list_empty(&e->flows));
- list_del(&flow->encaps[out_index].list);
- if (list_empty(next)) {
- struct mlx5e_encap_entry *e;
-
- e = list_entry(next, struct mlx5e_encap_entry, flows);
+ if (e->compl_result > 0) {
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
+ }
- hash_del_rcu(&e->encap_hlist);
- kfree(e->encap_header);
- kfree(e);
+ kfree(e->encap_header);
+ kfree_rcu(e, rcu);
+}
+
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
+ return;
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
+}
+
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow, int out_index)
+{
+ struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ /* flow wasn't fully initialized */
+ if (!e)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_del(&flow->encaps[out_index].list);
+ flow->encaps[out_index].e = NULL;
+ if (!refcount_dec_and_test(&e->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
}
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
}
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
- if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
- !(flow->flags & MLX5E_TC_FLOW_DUP))
+ if (!flow_flag_test(flow, ESWITCH) ||
+ !flow_flag_test(flow, DUP))
return;
mutex_lock(&esw->offloads.peer_mutex);
list_del(&flow->peer);
mutex_unlock(&esw->offloads.peer_mutex);
- flow->flags &= ~MLX5E_TC_FLOW_DUP;
+ flow_flag_clear(flow, DUP);
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
kvfree(flow->peer_flow);
@@ -1339,7 +1639,7 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ if (mlx5e_is_eswitch_flow(flow)) {
mlx5e_tc_del_fdb_peer_flow(flow);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
@@ -1840,6 +2140,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
+ bool is_eswitch_flow;
int err;
inner_match_level = MLX5_MATCH_NONE;
@@ -1850,7 +2151,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
- if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
+ is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
+ if (!err && is_eswitch_flow) {
rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
@@ -1864,7 +2166,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ if (is_eswitch_flow) {
flow->esw_attr->inner_match_level = inner_match_level;
flow->esw_attr->outer_match_level = outer_match_level;
} else {
@@ -2385,14 +2687,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
{
u32 actions;
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ if (mlx5e_is_eswitch_flow(flow))
actions = flow->esw_attr->action;
else
actions = flow->nic_attr->action;
- if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+ if (flow_flag_test(flow, EGRESS) &&
!((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)))
+ (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -2542,7 +2845,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
- flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ flow_flag_set(flow, HAIRPIN);
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
@@ -2629,6 +2932,31 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_entry *e;
+ struct encap_key e_key;
+
+ hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
+ encap_hlist, hash_key) {
+ e_key.ip_tun_key = &e->tun_info->key;
+ e_key.tc_tunnel = e->tunnel;
+ if (!cmp_encap_info(&e_key, key) &&
+ mlx5e_encap_take(e))
+ return e;
+ }
+
+ return NULL;
+}
+
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
@@ -2641,11 +2969,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
const struct ip_tunnel_info *tun_info;
- struct encap_key key, e_key;
+ struct encap_key key;
struct mlx5e_encap_entry *e;
unsigned short family;
uintptr_t hash_key;
- bool found = false;
int err = 0;
parse_attr = attr->parse_attr;
@@ -2660,42 +2987,60 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
hash_key = hash_encap_info(&key);
- hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
- encap_hlist, hash_key) {
- e_key.ip_tun_key = &e->tun_info->key;
- e_key.tc_tunnel = e->tunnel;
- if (!cmp_encap_info(&e_key, &key)) {
- found = true;
- break;
- }
- }
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ e = mlx5e_encap_get(priv, &key, hash_key);
/* must verify if encap is valid or not */
- if (found)
+ if (e) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ wait_for_completion(&e->res_ready);
+
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ if (e->compl_result < 0) {
+ err = -EREMOTEIO;
+ goto out_err;
+ }
goto attach_flow;
+ }
e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
+ if (!e) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ refcount_set(&e->refcnt, 1);
+ init_completion(&e->res_ready);
e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
- if (err)
+ if (err) {
+ kfree(e);
+ e = NULL;
goto out_err;
+ }
INIT_LIST_HEAD(&e->flows);
+ hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
if (family == AF_INET)
err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
- if (err)
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ complete_all(&e->res_ready);
+ if (err) {
+ e->compl_result = err;
goto out_err;
-
- hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+ }
+ e->compl_result = 1;
attach_flow:
+ flow->encaps[out_index].e = e;
list_add(&flow->encaps[out_index].list, &e->flows);
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
@@ -2706,11 +3051,14 @@ attach_flow:
} else {
*encap_valid = false;
}
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
return err;
out_err:
- kfree(e);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ if (e)
+ mlx5e_encap_put(priv, e);
return err;
}
@@ -2890,12 +3238,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+ struct net_device *uplink_upper;
+ rcu_read_lock();
+ uplink_upper =
+ netdev_master_upper_dev_get_rcu(uplink_dev);
if (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
uplink_upper == out_dev)
out_dev = uplink_dev;
+ rcu_read_unlock();
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
@@ -3066,19 +3418,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return 0;
}
-static void get_flags(int flags, u16 *flow_flags)
+static void get_flags(int flags, unsigned long *flow_flags)
{
- u16 __flow_flags = 0;
+ unsigned long __flow_flags = 0;
- if (flags & MLX5E_TC_INGRESS)
- __flow_flags |= MLX5E_TC_FLOW_INGRESS;
- if (flags & MLX5E_TC_EGRESS)
- __flow_flags |= MLX5E_TC_FLOW_EGRESS;
+ if (flags & MLX5_TC_FLAG(INGRESS))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
+ if (flags & MLX5_TC_FLAG(EGRESS))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
- if (flags & MLX5E_TC_ESW_OFFLOAD)
- __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
- if (flags & MLX5E_TC_NIC_OFFLOAD)
- __flow_flags |= MLX5E_TC_FLOW_NIC;
+ if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
+ if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
*flow_flags = __flow_flags;
}
@@ -3090,12 +3442,13 @@ static const struct rhashtable_params tc_ht_params = {
.automatic_shrinking = true,
};
-static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
+static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
+ unsigned long flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
- if (flags & MLX5E_TC_ESW_OFFLOAD) {
+ if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
return &uplink_rpriv->uplink_priv.tc_ht;
} else /* NIC offload */
@@ -3106,7 +3459,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
- flow->flags & MLX5E_TC_FLOW_INGRESS;
+ flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
@@ -3125,13 +3478,13 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
- struct flow_cls_offload *f, u16 flow_flags,
+ struct flow_cls_offload *f, unsigned long flow_flags,
struct mlx5e_tc_flow_parse_attr **__parse_attr,
struct mlx5e_tc_flow **__flow)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
- int err;
+ int out_index, err;
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
@@ -3143,6 +3496,12 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->cookie = f->cookie;
flow->flags = flow_flags;
flow->priv = priv;
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+ INIT_LIST_HEAD(&flow->encaps[out_index].list);
+ INIT_LIST_HEAD(&flow->mod_hdr);
+ INIT_LIST_HEAD(&flow->hairpin);
+ refcount_set(&flow->refcnt, 1);
+ init_completion(&flow->init_done);
*__flow = flow;
*__parse_attr = parse_attr;
@@ -3182,7 +3541,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev)
@@ -3193,7 +3552,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int attr_size, err;
- flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+ flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
attr_size = sizeof(struct mlx5_esw_flow_attr);
err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
&parse_attr, &flow);
@@ -3215,6 +3574,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free;
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
+ complete_all(&flow->init_done);
if (err) {
if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
goto err_free;
@@ -3225,15 +3585,14 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
- kfree(flow);
- kvfree(parse_attr);
+ mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
}
static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
struct mlx5e_tc_flow *flow,
- u16 flow_flags)
+ unsigned long flow_flags)
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
@@ -3271,7 +3630,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
}
flow->peer_flow = peer_flow;
- flow->flags |= MLX5E_TC_FLOW_DUP;
+ flow_flag_set(flow, DUP);
mutex_lock(&esw->offloads.peer_mutex);
list_add_tail(&flow->peer, &esw->offloads.peer_flows);
mutex_unlock(&esw->offloads.peer_mutex);
@@ -3284,7 +3643,7 @@ out:
static int
mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
@@ -3318,7 +3677,7 @@ out:
static int
mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
@@ -3332,7 +3691,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
return -EOPNOTSUPP;
- flow_flags |= MLX5E_TC_FLOW_NIC;
+ flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
attr_size = sizeof(struct mlx5_nic_flow_attr);
err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
&parse_attr, &flow);
@@ -3353,14 +3712,14 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ flow_flag_set(flow, OFFLOADED);
kvfree(parse_attr);
*__flow = flow;
return 0;
err_free:
- kfree(flow);
+ mlx5e_flow_put(priv, flow);
kvfree(parse_attr);
out:
return err;
@@ -3369,12 +3728,12 @@ out:
static int
mlx5e_tc_add_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- int flags,
+ unsigned long flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- u16 flow_flags;
+ unsigned long flow_flags;
int err;
get_flags(flags, &flow_flags);
@@ -3393,14 +3752,16 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
}
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct netlink_ext_ack *extack = f->common.extack;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ rcu_read_lock();
+ flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
+ rcu_read_unlock();
if (flow) {
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
@@ -3411,55 +3772,68 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
goto out;
}
+ trace_mlx5e_configure_flower(f);
err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
if (err)
goto out;
- err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
+ err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err)
goto err_free;
return 0;
err_free:
- mlx5e_tc_del_flow(priv, flow);
- kfree(flow);
+ mlx5e_flow_put(priv, flow);
out:
return err;
}
-#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
-#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
-
static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
{
- if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
- return true;
+ bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
+ bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
- return false;
+ return flow_flag_test(flow, INGRESS) == dir_ingress &&
+ flow_flag_test(flow, EGRESS) == dir_egress;
}
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
+ int err;
+ rcu_read_lock();
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (!flow || !same_flow_direction(flow, flags))
- return -EINVAL;
+ if (!flow || !same_flow_direction(flow, flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+ /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
+ * set.
+ */
+ if (flow_flag_test_and_set(flow, DELETED)) {
+ err = -EINVAL;
+ goto errout;
+ }
rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
+ rcu_read_unlock();
- mlx5e_tc_del_flow(priv, flow);
-
- kfree(flow);
+ trace_mlx5e_delete_flower(f);
+ mlx5e_flow_put(priv, flow);
return 0;
+
+errout:
+ rcu_read_unlock();
+ return err;
}
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
@@ -3469,15 +3843,24 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
u64 lastuse = 0;
u64 packets = 0;
u64 bytes = 0;
+ int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (!flow || !same_flow_direction(flow, flags))
- return -EINVAL;
+ rcu_read_lock();
+ flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
+ tc_ht_params));
+ rcu_read_unlock();
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ if (!same_flow_direction(flow, flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
if (!counter)
- return 0;
+ goto errout;
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
}
@@ -3489,8 +3872,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
if (!peer_esw)
goto out;
- if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
- (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
+ if (flow_flag_test(flow, DUP) &&
+ flow_flag_test(flow->peer_flow, OFFLOADED)) {
u64 bytes2;
u64 packets2;
u64 lastuse2;
@@ -3509,15 +3892,118 @@ no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
flow_stats_update(&f->stats, bytes, packets, lastuse);
+ trace_mlx5e_stats_flower(f);
+errout:
+ mlx5e_flow_put(priv, flow);
+ return err;
+}
+
+static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch *esw;
+ u16 vport_num;
+ u32 rate_mbps;
+ int err;
+
+ esw = priv->mdev->priv.eswitch;
+ /* rate is given in bytes/sec.
+ * First convert to bits/sec and then round to the nearest mbit/secs.
+ * mbit means million bits.
+ * Moreover, if rate is non zero we choose to configure to a minimum of
+ * 1 mbit/sec.
+ */
+ rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+ vport_num = rpriv->rep->vport;
+
+ err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
+
+ return err;
+}
+
+static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ const struct flow_action_entry *act;
+ int err;
+ int i;
+
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
+ if (err)
+ return err;
+
+ rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
+ return -EOPNOTSUPP;
+ }
+ }
return 0;
}
+int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct netlink_ext_ack *extack = ma->common.extack;
+ int prio = TC_H_MAJ(ma->common.prio) >> 16;
+
+ if (prio != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
+ return -EINVAL;
+ }
+
+ return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
+}
+
+int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct netlink_ext_ack *extack = ma->common.extack;
+
+ return apply_police_params(priv, 0, extack);
+}
+
+void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct rtnl_link_stats64 cur_stats;
+ u64 dbytes;
+ u64 dpkts;
+
+ cur_stats = priv->stats.vf_vport;
+ dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
+ dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
+ rpriv->prev_vf_vport_stats = cur_stats;
+ flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
+}
+
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
- struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin_entry *hpe, *tmp;
+ LIST_HEAD(init_wait_list);
u16 peer_vhca_id;
int bkt;
@@ -3526,9 +4012,18 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
- if (hpe->peer_vhca_id == peer_vhca_id)
+ mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
+ hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
+ if (refcount_inc_not_zero(&hpe->refcnt))
+ list_add(&hpe->dead_peer_wait_list, &init_wait_list);
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+
+ list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
+ wait_for_completion(&hpe->res_ready);
+ if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
hpe->hp->pair->peer_gone = true;
+
+ mlx5e_hairpin_put(priv, hpe);
}
}
@@ -3564,7 +4059,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
int err;
- hash_init(tc->mod_hdr_tbl);
+ mutex_init(&tc->t_lock);
+ mutex_init(&tc->mod_hdr.lock);
+ hash_init(tc->mod_hdr.hlist);
+ mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
err = rhashtable_init(&tc->ht, &tc_ht_params);
@@ -3596,12 +4094,16 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier(&tc->netdevice_nb);
+ mutex_destroy(&tc->mod_hdr.lock);
+ mutex_destroy(&tc->hairpin_tbl_lock);
+
rhashtable_destroy(&tc->ht);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(tc->t);
tc->t = NULL;
}
+ mutex_destroy(&tc->t_lock);
}
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
@@ -3614,7 +4116,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
}
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
@@ -3636,10 +4138,10 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
reoffload_flows_work);
struct mlx5e_tc_flow *flow, *tmp;
- rtnl_lock();
+ mutex_lock(&rpriv->unready_flows_lock);
list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
- remove_unready_flow(flow);
+ unready_flow_del(flow);
}
- rtnl_unlock();
+ mutex_unlock(&rpriv->unready_flows_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 3ab39275ca7d..924c6ef86a14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -40,13 +40,15 @@
#ifdef CONFIG_MLX5_ESWITCH
enum {
- MLX5E_TC_INGRESS = BIT(0),
- MLX5E_TC_EGRESS = BIT(1),
- MLX5E_TC_NIC_OFFLOAD = BIT(2),
- MLX5E_TC_ESW_OFFLOAD = BIT(3),
- MLX5E_TC_LAST_EXPORTED_BIT = 3,
+ MLX5E_TC_FLAG_INGRESS_BIT,
+ MLX5E_TC_FLAG_EGRESS_BIT,
+ MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
};
+#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
@@ -54,23 +56,37 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
+
+int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *f);
+int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *f);
+void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list);
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list);
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
+
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
+void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags);
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
@@ -80,7 +96,11 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; }
+static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
+ unsigned long flags)
+{
+ return 0;
+}
#endif
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 600e92cb629a..d3a67a9b4eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -210,7 +210,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -292,8 +292,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs;
} else {
- u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ?
- MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode;
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
opcode = MLX5_OPCODE_SEND;
mss = 0;
@@ -608,9 +607,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs;
} else {
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
+
opcode = MLX5_OPCODE_SEND;
mss = 0;
- ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ ihs = mlx5e_calc_min_inline(mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
stats->packets++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 41f25ea2e8d9..580c71cb9dfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
*/
dma_rmb();
- if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
- atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
- else
- mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
-
+ atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
@@ -328,10 +324,13 @@ err_buf:
/**
* mlx5_eq_enable - Enable EQ for receiving EQEs
- * @dev - Device which owns the eq
- * @eq - EQ to enable
- * @nb - notifier call block
- * mlx5_eq_enable - must be called after EQ is created in device.
+ * @dev : Device which owns the eq
+ * @eq : EQ to enable
+ * @nb : Notifier call block
+ *
+ * Must be called after EQ is created in device.
+ *
+ * @return: 0 if no error
*/
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
@@ -348,11 +347,12 @@ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
EXPORT_SYMBOL(mlx5_eq_enable);
/**
- * mlx5_eq_disable - Enable EQ for receiving EQEs
- * @dev - Device which owns the eq
- * @eq - EQ to disable
- * @nb - notifier call block
- * mlx5_eq_disable - must be called before EQ is destroyed.
+ * mlx5_eq_disable - Disable EQ for receiving EQEs
+ * @dev : Device which owns the eq
+ * @eq : EQ to disable
+ * @nb : Notifier call block
+ *
+ * Must be called before EQ is destroyed.
*/
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
@@ -415,7 +415,7 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table;
- int i, err;
+ int i;
eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
if (!eq_table)
@@ -423,9 +423,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
dev->priv.eq_table = eq_table;
- err = mlx5_eq_debugfs_init(dev);
- if (err)
- goto kvfree_eq_table;
+ mlx5_eq_debugfs_init(dev);
mutex_init(&eq_table->lock);
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
@@ -433,11 +431,6 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
eq_table->irq_table = dev->priv.irq_table;
return 0;
-
-kvfree_eq_table:
- kvfree(eq_table);
- dev->priv.eq_table = NULL;
- return err;
}
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
@@ -945,9 +938,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
- if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
- return -EINVAL;
-
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_register);
@@ -956,9 +946,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
- if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
- return -EINVAL;
-
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1f3891fde2eb..30aae76b6a1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -58,20 +58,9 @@ struct vport_addr {
bool mc_promisc;
};
-enum {
- UC_ADDR_CHANGE = BIT(0),
- MC_ADDR_CHANGE = BIT(1),
- PROMISC_CHANGE = BIT(3),
-};
-
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
-/* Vport context events */
-#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
- MC_ADDR_CHANGE | \
- PROMISC_CHANGE)
-
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
- if (events_mask & UC_ADDR_CHANGE)
+ if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_uc_address_change, 1);
- if (events_mask & MC_ADDR_CHANGE)
+ if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
- if (events_mask & PROMISC_CHANGE)
+ if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
@@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+static int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+ int ret;
+
+ ret = esw_create_legacy_table(esw);
+ if (ret)
+ return ret;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ return 0;
+}
+
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
esw_cleanup_vepa_rules(esw);
@@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw_destroy_legacy_vepa_table(esw);
}
+static void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+ struct esw_mc_addr *mc_promisc;
+
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+
+ mc_promisc = &esw->mc_promisc;
+ if (mc_promisc->uplink_rule)
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+ esw_destroy_legacy_table(esw);
+}
+
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
- if (vport->enabled_events & UC_ADDR_CHANGE) {
+ if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
}
- if (vport->enabled_events & MC_ADDR_CHANGE)
+ if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
- if (vport->enabled_events & PROMISC_CHANGE) {
+ if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
esw_update_vport_mc_promisc(esw, vport);
}
- if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
+ if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
@@ -1393,18 +1411,49 @@ out:
return err;
}
+static bool element_type_supported(struct mlx5_eswitch *esw, int type)
+{
+ const struct mlx5_core_dev *dev = esw->dev;
+
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_TASR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ }
+ return false;
+}
+
/* Vport QoS management */
-static int esw_create_tsar(struct mlx5_eswitch *esw)
+static void esw_create_tsar(struct mlx5_eswitch *esw)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
+ __be32 *attr;
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return 0;
+ return;
+
+ if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+ return;
if (esw->qos.enabled)
- return -EEXIST;
+ return;
+
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
- return err;
+ return;
}
esw->qos.enabled = true;
- return 0;
}
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
@@ -1537,6 +1585,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw,
return 0;
}
+int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 rate_mbps)
+{
+ u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
+
+ return mlx5_modify_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ ctx,
+ vport->qos.esw_tsar_ix,
+ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
+}
+
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
{
((u8 *)node_guid)[7] = mac[0];
@@ -1619,7 +1683,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
}
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- int enable_events)
+ enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
@@ -1641,7 +1705,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
- vport->enabled_events = enable_events;
+ vport->enabled_events = enabled_events;
vport->enabled = true;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
@@ -1770,11 +1834,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
+ * whichever are present on the eswitch.
+ */
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
+ int i;
+
+ /* Enable PF vport */
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ esw_enable_vport(esw, vport, enabled_events);
+
+ /* Enable ECPF vports */
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ esw_enable_vport(esw, vport, enabled_events);
+ }
+
+ /* Enable VF vports */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ esw_enable_vport(esw, vport, enabled_events);
+}
+
+/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
+ * whichever are previously enabled on the eswitch.
+ */
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ esw_disable_vport(esw, vport);
+}
+
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+{
int err;
- int i, enabled_events;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
@@ -1788,44 +1887,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
+ esw_create_tsar(esw);
+
esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) {
- err = esw_create_legacy_table(esw);
- if (err)
- goto abort;
+ err = esw_legacy_enable(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw);
+ err = esw_offloads_enable(esw);
}
if (err)
goto abort;
- err = esw_create_tsar(esw);
- if (err)
- esw_warn(esw->dev, "Failed to create eswitch TSAR");
-
- enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
- UC_ADDR_CHANGE;
-
- /* Enable PF vport */
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_enable_vport(esw, vport, enabled_events);
-
- /* Enable ECPF vports */
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_enable_vport(esw, vport, enabled_events);
- }
-
- /* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- esw_enable_vport(esw, vport, enabled_events);
-
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
@@ -1847,10 +1925,7 @@ abort:
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
- struct esw_mc_addr *mc_promisc;
- struct mlx5_vport *vport;
int old_mode;
- int i;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
return;
@@ -1859,21 +1934,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mc_promisc = &esw->mc_promisc;
mlx5_eswitch_event_handlers_unregister(esw);
- mlx5_esw_for_all_vports(esw, i, vport)
- esw_disable_vport(esw, vport);
-
- if (mc_promisc && mc_promisc->uplink_rule)
- mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
- esw_destroy_tsar(esw);
-
if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_destroy_legacy_table(esw);
+ esw_legacy_disable(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_cleanup(esw);
+ esw_offloads_disable(esw);
+
+ esw_destroy_tsar(esw);
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1931,8 +1999,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
+ mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
- hash_init(esw->offloads.mod_hdr_tbl);
+ mutex_init(&esw->offloads.mod_hdr.lock);
+ hash_init(esw->offloads.mod_hdr.hlist);
+ atomic64_set(&esw->offloads.num_flows, 0);
mutex_init(&esw->state_lock);
mlx5_esw_for_all_vports(esw, i, vport) {
@@ -1968,6 +2039,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
esw_offloads_cleanup_reps(esw);
+ mutex_destroy(&esw->offloads.mod_hdr.lock);
+ mutex_destroy(&esw->offloads.encap_tbl_lock);
kfree(esw->vports);
kfree(esw);
}
@@ -2085,23 +2158,19 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan > 4095 || qos > 7)
return -EINVAL;
- mutex_lock(&esw->state_lock);
-
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
- goto unlock;
+ return err;
evport->info.vlan = vlan;
evport->info.qos = qos;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
- goto unlock;
+ return err;
err = esw_vport_egress_config(esw, evport);
}
-unlock:
- mutex_unlock(&esw->state_lock);
return err;
}
@@ -2109,11 +2178,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos)
{
u8 set_flags = 0;
+ int err;
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
- return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+ mutex_lock(&esw->state_lock);
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+ mutex_unlock(&esw->state_lock);
+
+ return err;
}
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 04685dbb280c..aba9e7a6ad3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -35,6 +35,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
+#include <linux/atomic.h>
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
@@ -101,6 +102,13 @@ struct mlx5_vport_info {
bool trusted;
};
+/* Vport context events */
+enum mlx5_eswitch_vport_event {
+ MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
+ MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
+ MLX5_VPORT_PROMISC_CHANGE = BIT(3),
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@@ -122,7 +130,7 @@ struct mlx5_vport {
} qos;
bool enabled;
- u16 enabled_events;
+ enum mlx5_eswitch_vport_event enabled_events;
};
enum offloads_fdb_flags {
@@ -173,13 +181,14 @@ struct mlx5_esw_offload {
struct mlx5_eswitch_rep *vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
+ struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8);
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ struct mod_hdr_tbl mod_hdr;
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
- u64 num_flows;
+ atomic64_t num_flows;
enum devlink_eswitch_encap_mode encap;
};
@@ -207,8 +216,11 @@ enum {
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
+ /* legacy data structures */
struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
+ struct esw_mc_addr mc_promisc;
+ /* end of legacy */
struct workqueue_struct *work_queue;
struct mlx5_vport *vports;
u32 flags;
@@ -218,7 +230,6 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes
*/
struct mutex state_lock;
- struct esw_mc_addr mc_promisc;
struct {
bool enabled;
@@ -233,8 +244,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
};
-void esw_offloads_cleanup(struct mlx5_eswitch *esw);
-int esw_offloads_init(struct mlx5_eswitch *esw);
+void esw_offloads_disable(struct mlx5_eswitch *esw);
+int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -251,6 +262,8 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
+int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 rate_mbps);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -513,6 +526,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
(vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++)
+#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
+ for ((i) = (esw)->total_vports - 1; \
+ (vport) = &(esw)->vports[i], \
+ (i) >= MLX5_VPORT_PF; (i)--)
+
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
@@ -574,6 +592,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0323fd078271..7d3582ee66b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -229,7 +229,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
goto err_add_rule;
else
- esw->offloads.num_flows++;
+ atomic64_inc(&esw->offloads.num_flows);
return rule;
@@ -294,7 +294,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
goto add_err;
- esw->offloads.num_flows++;
+ atomic64_inc(&esw->offloads.num_flows);
return rule;
add_err:
@@ -322,7 +322,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
}
- esw->offloads.num_flows--;
+ atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
esw_put_prio_table(esw, attr->chain, attr->prio, 1);
@@ -438,9 +438,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
!attr->dest_chain);
+ mutex_lock(&esw->state_lock);
+
err = esw_add_vlan_action_check(attr, push, pop, fwd);
if (err)
- return err;
+ goto unlock;
attr->vlan_handled = false;
@@ -453,11 +455,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
attr->vlan_handled = true;
}
- return 0;
+ goto unlock;
}
if (!push && !pop)
- return 0;
+ goto unlock;
if (!(offloads->vlan_push_pop_refcount)) {
/* it's the 1st vlan rule, apply global vlan pop policy */
@@ -482,6 +484,8 @@ skip_set_push:
out:
if (!err)
attr->vlan_handled = true;
+unlock:
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -504,6 +508,8 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ mutex_lock(&esw->state_lock);
+
vport = esw_vlan_action_get_vport(attr, push, pop);
if (!push && !pop && fwd) {
@@ -511,7 +517,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
- return 0;
+ goto out;
}
if (push) {
@@ -529,12 +535,13 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
skip_unset_push:
offloads->vlan_push_pop_refcount--;
if (offloads->vlan_push_pop_refcount)
- return 0;
+ goto out;
/* no more vlan rules, stop global vlan pop policy */
err = esw_set_global_vlan_pop(esw, 0);
out:
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -583,38 +590,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
+static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id;
int err;
- err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
- out, sizeof(out));
- if (err)
- return err;
-
- fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.fdb_to_vport_reg_c_id);
-
- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
- MLX5_SET(modify_esw_vport_context_in, in,
- esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
-
- MLX5_SET(modify_esw_vport_context_in, in,
- field_select.fdb_to_vport_reg_c_id, 1);
-
- return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
- in, sizeof(in));
-}
-
-static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
-{
- u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
- u8 fdb_to_vport_reg_c_id;
- int err;
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return 0;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
out, sizeof(out));
@@ -624,7 +608,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+ if (enable)
+ fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+ else
+ fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
@@ -1402,10 +1389,9 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
int total_vports = esw->total_vports;
- struct mlx5_core_dev *dev = esw->dev;
struct mlx5_eswitch_rep *rep;
- u8 hw_id[ETH_ALEN], rep_type;
int vport_index;
+ u8 rep_type;
esw->offloads.vport_reps = kcalloc(total_vports,
sizeof(struct mlx5_eswitch_rep),
@@ -1413,12 +1399,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
if (!esw->offloads.vport_reps)
return -ENOMEM;
- mlx5_query_mac_address(dev, hw_id);
-
mlx5_esw_for_all_reps(esw, vport_index, rep) {
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
rep->vport_index = vport_index;
- ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
atomic_set(&rep->rep_data[rep_type].state,
@@ -2120,7 +2103,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
-int esw_offloads_init(struct mlx5_eswitch *esw)
+int esw_offloads_enable(struct mlx5_eswitch *esw)
{
int err;
@@ -2134,11 +2117,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- err = mlx5_eswitch_enable_passing_vport_metadata(esw);
- if (err)
- goto err_vport_metadata;
- }
+ err = esw_set_passing_vport_metadata(esw, true);
+ if (err)
+ goto err_vport_metadata;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw);
if (err)
@@ -2152,8 +2135,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0;
err_reps:
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- mlx5_eswitch_disable_passing_vport_metadata(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
return err;
@@ -2178,13 +2161,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
-void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- mlx5_eswitch_disable_passing_vport_metadata(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
@@ -2345,7 +2328,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
break;
}
- if (esw->offloads.num_flows > 0) {
+ if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured");
return -EOPNOTSUPP;
@@ -2455,7 +2438,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (esw->offloads.encap == encap)
return 0;
- if (esw->offloads.num_flows > 0) {
+ if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7ac1249eadc3..1e3381604b3d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -182,7 +182,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
} else {
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_action,
- ns->def_miss_action);
+ ft->def_miss_action);
}
break;
@@ -262,7 +262,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
} else {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action,
- ns->def_miss_action);
+ ft->def_miss_action);
}
}
@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id)
{
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+ MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
return err;
}
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+{
+ return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
+}
+
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
return 0;
}
-struct mlx5_cmd_fc_bulk {
- u32 id;
- int num;
- int outlen;
- u32 out[0];
-};
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
-{
- struct mlx5_cmd_fc_bulk *b;
- int outlen =
- MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter) * num;
-
- b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
- if (!b)
- return NULL;
-
- b->id = id;
- b->num = num;
- b->outlen = outlen;
-
- return b;
-}
-
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
{
- kfree(b);
+ return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
}
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out)
{
+ int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
- MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
- MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
- return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
-}
-
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
- struct mlx5_cmd_fc_bulk *b, u32 id,
- u64 *packets, u64 *bytes)
-{
- int index = id - b->id;
- void *stats;
-
- if (index < 0 || index >= b->num) {
- mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
- id, b->id, b->id + b->num - 1);
- return;
- }
-
- stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
- flow_statistics[index]);
- *packets = MLX5_GET64(traffic_counter, stats, packets);
- *bytes = MLX5_GET64(traffic_counter, stats, octets);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
+ MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index e340f9af2f5a..bc4606306009 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes);
-struct mlx5_cmd_fc_bulk;
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
- struct mlx5_cmd_fc_bulk *b, u32 id,
- u64 *packets, u64 *bytes);
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3e99799bdb40..7bdec442f0ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -60,7 +60,8 @@
ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
__VA_ARGS__)\
-#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
+#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
+ .def_miss_action = def_miss_act,\
.children = (struct init_tree_node[]) {__VA_ARGS__},\
.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
}
@@ -131,33 +132,41 @@ static struct init_tree_node {
int num_leaf_prios;
int prio;
int num_levels;
+ enum mlx5_flow_table_miss_action def_miss_action;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
.ar_size = 7,
- .children = (struct init_tree_node[]) {
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
- BY_PASS_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, LAG_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
- LAG_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
- ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
- ETHTOOL_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
- ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
- KERNEL_NIC_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
- ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
+ .children = (struct init_tree_node[]){
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
+ LAG_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
+ OFFLOADS_MAX_FT))),
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
+ ETHTOOL_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
+ KERNEL_NIC_TC_NUM_LEVELS),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
+ LEFTOVERS_NUM_LEVELS))),
+ ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
+ ANCHOR_NUM_LEVELS))),
}
};
@@ -167,8 +176,29 @@ static struct init_tree_node egress_root_fs = {
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+};
+
+#define RDMA_RX_BYPASS_PRIO 0
+#define RDMA_RX_KERNEL_PRIO 1
+static struct init_tree_node rdma_rx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 2,
+ .children = (struct init_tree_node[]) {
+ [RDMA_RX_BYPASS_PRIO] =
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ [RDMA_RX_KERNEL_PRIO] =
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
+ ADD_MULTIPLE_PRIO(1, 1))),
}
};
@@ -1014,6 +1044,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
+ ft->def_miss_action = ns->def_miss_action;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
@@ -2056,16 +2087,18 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
return NULL;
- case MLX5_FLOW_NAMESPACE_RDMA_RX:
- if (steering->rdma_rx_root_ns)
- return &steering->rdma_rx_root_ns->ns;
- return NULL;
default:
break;
}
if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
root_ns = steering->egress_root_ns;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_BYPASS_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_KERNEL_PRIO;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@ -2155,7 +2188,8 @@ static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
return ns;
}
-static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
+ int def_miss_act)
{
struct mlx5_flow_namespace *ns;
@@ -2164,6 +2198,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
+ ns->def_miss_action = def_miss_act;
tree_init_node(&ns->node, NULL, del_sw_ns);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
@@ -2230,7 +2265,7 @@ static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
base = &fs_prio->node;
} else if (init_node->type == FS_TYPE_NAMESPACE) {
fs_get_obj(fs_prio, fs_parent_node);
- fs_ns = fs_create_namespace(fs_prio);
+ fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
if (IS_ERR(fs_ns))
return PTR_ERR(fs_ns);
base = &fs_ns->node;
@@ -2494,18 +2529,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
- struct fs_prio *prio;
+ int err;
steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
if (!steering->rdma_rx_root_ns)
return -ENOMEM;
- steering->rdma_rx_root_ns->def_miss_action =
- MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
+ err = init_root_tree(steering, &rdma_rx_root_fs,
+ &steering->rdma_rx_root_ns->ns.node);
+ if (err)
+ goto out_err;
- /* Create single prio */
- prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ set_prio_attrs(steering->rdma_rx_root_ns);
+
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ steering->rdma_rx_root_ns = NULL;
+ return err;
}
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
@@ -2543,7 +2585,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
}
for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
- ns = fs_create_namespace(maj_prio);
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
if (IS_ERR(ns)) {
err = PTR_ERR(ns);
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c1252d6be0ef..0d16b4b5ab83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -145,6 +145,7 @@ struct mlx5_flow_table {
struct list_head fwd_rules;
u32 flags;
struct rhltable fgs_hash;
+ enum mlx5_flow_table_miss_action def_miss_action;
};
struct mlx5_ft_underlay_qp {
@@ -191,6 +192,7 @@ struct fs_prio {
struct mlx5_flow_namespace {
/* parent == NULL => root ns */
struct fs_node node;
+ enum mlx5_flow_table_miss_action def_miss_action;
};
struct mlx5_flow_group_mask {
@@ -219,7 +221,6 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
struct list_head underlay_qpns;
const struct mlx5_flow_cmds *cmds;
- enum mlx5_flow_table_miss_action def_miss_action;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 1834d9f3aa1c..ab69effb056d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -40,6 +40,8 @@
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
+#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FC_POOL_USED_BUFF_RATIO 10
struct mlx5_fc_cache {
u64 packets;
@@ -58,12 +60,18 @@ struct mlx5_fc {
u64 lastpackets;
u64 lastbytes;
+ struct mlx5_fc_bulk *bulk;
u32 id;
bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
+static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+
/* locking scheme:
*
* It is the responsibility of the user to prevent concurrent calls or bad
@@ -75,7 +83,7 @@ struct mlx5_fc {
* access to counter list:
* - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by
- * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
+ * mlx5_fc_stats_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single
* node.
* - spawn thread to do the actual destroy
@@ -136,81 +144,87 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
-/* The function returns the last counter that was queried so the caller
- * function can continue calling it till all counters are queried.
- */
-static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
- struct mlx5_fc *first,
- u32 last_id)
+static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- struct mlx5_fc *counter = NULL;
- struct mlx5_cmd_fc_bulk *b;
- bool more = false;
- u32 afirst_id;
- int num;
- int err;
+ return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+}
- int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
- (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+static void update_counter_cache(int index, u32 *bulk_raw_data,
+ struct mlx5_fc_cache *cache)
+{
+ void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
+ flow_statistics[index]);
+ u64 packets = MLX5_GET64(traffic_counter, stats, packets);
+ u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
- /* first id must be aligned to 4 when using bulk query */
- afirst_id = first->id & ~0x3;
+ if (cache->packets == packets)
+ return;
- /* number of counters to query inc. the last counter */
- num = ALIGN(last_id - afirst_id + 1, 4);
- if (num > max_bulk) {
- num = max_bulk;
- last_id = afirst_id + num - 1;
- }
+ cache->packets = packets;
+ cache->bytes = bytes;
+ cache->lastuse = jiffies;
+}
- b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
- if (!b) {
- mlx5_core_err(dev, "Error allocating resources for bulk query\n");
- return NULL;
- }
+static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
+ struct mlx5_fc *first,
+ u32 last_id)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ bool query_more_counters = (first->id <= last_id);
+ int max_bulk_len = get_max_bulk_query_len(dev);
+ u32 *data = fc_stats->bulk_query_out;
+ struct mlx5_fc *counter = first;
+ u32 bulk_base_id;
+ int bulk_len;
+ int err;
- err = mlx5_cmd_fc_bulk_query(dev, b);
- if (err) {
- mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
- goto out;
- }
+ while (query_more_counters) {
+ /* first id must be aligned to 4 when using bulk query */
+ bulk_base_id = counter->id & ~0x3;
- counter = first;
- list_for_each_entry_from(counter, &fc_stats->counters, list) {
- struct mlx5_fc_cache *c = &counter->cache;
- u64 packets;
- u64 bytes;
+ /* number of counters to query inc. the last counter */
+ bulk_len = min_t(int, max_bulk_len,
+ ALIGN(last_id - bulk_base_id + 1, 4));
- if (counter->id > last_id) {
- more = true;
- break;
+ err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
+ data);
+ if (err) {
+ mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+ return;
}
+ query_more_counters = false;
- mlx5_cmd_fc_bulk_get(dev, b,
- counter->id, &packets, &bytes);
+ list_for_each_entry_from(counter, &fc_stats->counters, list) {
+ int counter_index = counter->id - bulk_base_id;
+ struct mlx5_fc_cache *cache = &counter->cache;
- if (c->packets == packets)
- continue;
+ if (counter->id >= bulk_base_id + bulk_len) {
+ query_more_counters = true;
+ break;
+ }
- c->packets = packets;
- c->bytes = bytes;
- c->lastuse = jiffies;
+ update_counter_cache(counter_index, data, cache);
+ }
}
-
-out:
- mlx5_cmd_fc_bulk_free(b);
-
- return more ? counter : NULL;
}
-static void mlx5_free_fc(struct mlx5_core_dev *dev,
- struct mlx5_fc *counter)
+static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
+static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ if (counter->bulk)
+ mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
+ else
+ mlx5_fc_free(dev, counter);
+}
+
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
@@ -234,7 +248,7 @@ static void mlx5_fc_stats_work(struct work_struct *work)
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
}
if (time_before(now, fc_stats->next_query) ||
@@ -244,32 +258,62 @@ static void mlx5_fc_stats_work(struct work_struct *work)
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
list);
- while (counter)
- counter = mlx5_fc_stats_query(dev, counter, last->id);
+ if (counter)
+ mlx5_fc_stats_query_counter_range(dev, counter, last->id);
fc_stats->next_query = now + fc_stats->sampling_interval;
}
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter;
int err;
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&counter->list);
err = mlx5_cmd_fc_alloc(dev, &counter->id);
- if (err)
- goto err_out;
+ if (err) {
+ kfree(counter);
+ return ERR_PTR(err);
+ }
+
+ return counter;
+}
+
+static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+
+ if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
+ counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
+ if (!IS_ERR(counter))
+ return counter;
+ }
+
+ return mlx5_fc_single_alloc(dev);
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int err;
+
+ if (IS_ERR(counter))
+ return counter;
+
+ INIT_LIST_HEAD(&counter->list);
+ counter->aging = aging;
if (aging) {
u32 id = counter->id;
counter->cache.lastuse = jiffies;
- counter->aging = true;
+ counter->lastbytes = counter->cache.bytes;
+ counter->lastpackets = counter->cache.packets;
idr_preload(GFP_KERNEL);
spin_lock(&fc_stats->counters_idr_lock);
@@ -290,10 +334,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
return counter;
err_out_alloc:
- mlx5_cmd_fc_free(dev, counter->id);
-err_out:
- kfree(counter);
-
+ mlx5_fc_release(dev, counter);
return ERR_PTR(err);
}
EXPORT_SYMBOL(mlx5_fc_create);
@@ -317,13 +358,15 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
return;
}
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
}
EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int max_bulk_len;
+ int max_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@@ -331,14 +374,25 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
+ max_bulk_len = get_max_bulk_query_len(dev);
+ max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
+ fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
+ if (!fc_stats->bulk_query_out)
+ return -ENOMEM;
+
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
- return -ENOMEM;
+ goto err_wq_create;
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+ mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
return 0;
+
+err_wq_create:
+ kfree(fc_stats->bulk_query_out);
+ return -ENOMEM;
}
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
@@ -352,14 +406,16 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
- idr_destroy(&fc_stats->counters_idr);
-
tmplist = llist_del_all(&fc_stats->addlist);
llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
+
+ mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
+ idr_destroy(&fc_stats->counters_idr);
+ kfree(fc_stats->bulk_query_out);
}
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
@@ -406,3 +462,243 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval = min_t(unsigned long, interval,
fc_stats->sampling_interval);
}
+
+/* Flow counter bluks */
+
+struct mlx5_fc_bulk {
+ struct list_head pool_list;
+ u32 base_id;
+ int bulk_len;
+ unsigned long *bitmask;
+ struct mlx5_fc fcs[0];
+};
+
+static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
+ u32 id)
+{
+ counter->bulk = bulk;
+ counter->id = id;
+}
+
+static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+{
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
+ struct mlx5_fc_bulk *bulk;
+ int err = -ENOMEM;
+ int bulk_len;
+ u32 base_id;
+ int i;
+
+ alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
+ bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
+
+ bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
+ GFP_KERNEL);
+ if (!bulk)
+ goto err_alloc_bulk;
+
+ bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!bulk->bitmask)
+ goto err_alloc_bitmask;
+
+ err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
+ if (err)
+ goto err_mlx5_cmd_bulk_alloc;
+
+ bulk->base_id = base_id;
+ bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++) {
+ mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
+ set_bit(i, bulk->bitmask);
+ }
+
+ return bulk;
+
+err_mlx5_cmd_bulk_alloc:
+ kfree(bulk->bitmask);
+err_alloc_bitmask:
+ kfree(bulk);
+err_alloc_bulk:
+ return ERR_PTR(err);
+}
+
+static int
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+{
+ if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
+ return -EBUSY;
+ }
+
+ mlx5_cmd_fc_free(dev, bulk->base_id);
+ kfree(bulk->bitmask);
+ kfree(bulk);
+
+ return 0;
+}
+
+static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+{
+ int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
+
+ if (free_fc_index >= bulk->bulk_len)
+ return ERR_PTR(-ENOSPC);
+
+ clear_bit(free_fc_index, bulk->bitmask);
+ return &bulk->fcs[free_fc_index];
+}
+
+static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
+{
+ int fc_index = fc->id - bulk->base_id;
+
+ if (test_bit(fc_index, bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(fc_index, bulk->bitmask);
+ return 0;
+}
+
+/* Flow counters pool API */
+
+static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
+{
+ fc_pool->dev = dev;
+ mutex_init(&fc_pool->pool_lock);
+ INIT_LIST_HEAD(&fc_pool->fully_used);
+ INIT_LIST_HEAD(&fc_pool->partially_used);
+ INIT_LIST_HEAD(&fc_pool->unused);
+ fc_pool->available_fcs = 0;
+ fc_pool->used_fcs = 0;
+ fc_pool->threshold = 0;
+}
+
+static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+}
+
+static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+{
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+}
+
+static struct mlx5_fc_bulk *
+mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *new_bulk;
+
+ new_bulk = mlx5_fc_bulk_create(dev);
+ if (!IS_ERR(new_bulk))
+ fc_pool->available_fcs += new_bulk->bulk_len;
+ mlx5_fc_pool_update_threshold(fc_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+
+ fc_pool->available_fcs -= bulk->bulk_len;
+ mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fc_pool_update_threshold(fc_pool);
+}
+
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk)
+{
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc *fc;
+
+ if (list_empty(src_list))
+ return ERR_PTR(-ENODATA);
+
+ bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
+ fc = mlx5_fc_bulk_acquire_fc(bulk);
+ if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
+ list_move(&bulk->pool_list, next_list);
+ return fc;
+}
+
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fc *fc;
+
+ mutex_lock(&fc_pool->pool_lock);
+
+ fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
+ &fc_pool->fully_used, false);
+ if (IS_ERR(fc))
+ fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
+ &fc_pool->partially_used,
+ true);
+ if (IS_ERR(fc)) {
+ new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
+ if (IS_ERR(new_bulk)) {
+ fc = ERR_CAST(new_bulk);
+ goto out;
+ }
+ fc = mlx5_fc_bulk_acquire_fc(new_bulk);
+ list_add(&new_bulk->pool_list, &fc_pool->partially_used);
+ }
+ fc_pool->available_fcs--;
+ fc_pool->used_fcs++;
+
+out:
+ mutex_unlock(&fc_pool->pool_lock);
+ return fc;
+}
+
+static void
+mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *bulk = fc->bulk;
+ int bulk_free_fcs_amount;
+
+ mutex_lock(&fc_pool->pool_lock);
+
+ if (mlx5_fc_bulk_release_fc(bulk, fc)) {
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
+ goto unlock;
+ }
+
+ fc_pool->available_fcs++;
+ fc_pool->used_fcs--;
+
+ bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
+ if (bulk_free_fcs_amount == 1)
+ list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
+ if (bulk_free_fcs_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fc_pool->available_fcs > fc_pool->threshold)
+ mlx5_fc_pool_free_bulk(fc_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fc_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fc_pool->pool_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 1a2560e3bf7c..3ed8ab2d703d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -279,7 +279,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -296,7 +296,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index c5a491e22e55..96e64187c089 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -210,7 +210,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0]);
+ err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -228,7 +228,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
return 0;
err_clear_state_opened_flag:
- mlx5e_destroy_tis(mdev, epriv->tisn[0]);
+ mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
err_unint_underlay_qp:
@@ -257,7 +257,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
- mlx5e_destroy_tis(mdev, priv->tisn[0]);
+ mlx5e_destroy_tis(mdev, priv->tisn[0][0]);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index ea1d4d26ece0..3fc575d1c3ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -2,6 +2,7 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "mlx5_core.h"
+#include "lib/mlx5.h"
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 3dfab91ae5f2..4be4d2d36218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -87,7 +87,7 @@ void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
/* This function should only be called after mlx5_cmd_force_teardown_hca */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c
new file mode 100644
index 000000000000..583dc7e2aca8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include <linux/hyperv.h>
+#include "mlx5_core.h"
+#include "lib/hv.h"
+
+static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset, bool read)
+{
+ int rc = -EOPNOTSUPP;
+ int bytes_returned;
+ int block_id;
+
+ if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len != HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ block_id = offset / HV_CONFIG_BLOCK_SIZE_MAX;
+
+ rc = read ?
+ hyperv_read_cfg_blk(dev->pdev, buf,
+ HV_CONFIG_BLOCK_SIZE_MAX, block_id,
+ &bytes_returned) :
+ hyperv_write_cfg_blk(dev->pdev, buf,
+ HV_CONFIG_BLOCK_SIZE_MAX, block_id);
+
+ /* Make sure len bytes were read successfully */
+ if (read && !rc && len != bytes_returned)
+ rc = -EIO;
+
+ if (rc) {
+ mlx5_core_err(dev, "Failed to %s hv config, err = %d, len = %d, offset = %d\n",
+ read ? "read" : "write", rc, len,
+ offset);
+ return rc;
+ }
+
+ return 0;
+}
+
+int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset)
+{
+ return mlx5_hv_config_common(dev, buf, len, offset, true);
+}
+
+int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset)
+{
+ return mlx5_hv_config_common(dev, buf, len, offset, false);
+}
+
+int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask))
+{
+ return hyperv_reg_block_invalidate(dev->pdev, context,
+ block_invalidate);
+}
+
+void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev)
+{
+ hyperv_reg_block_invalidate(dev->pdev, NULL, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h
new file mode 100644
index 000000000000..f9a45573f459
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __LIB_HV_H__
+#define __LIB_HV_H__
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+#include <linux/hyperv.h>
+#include <linux/mlx5/driver.h>
+
+int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset);
+int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset);
+int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev);
+#endif
+
+#endif /* __LIB_HV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
new file mode 100644
index 000000000000..4047629a876b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include <linux/hyperv.h>
+#include "mlx5_core.h"
+#include "lib/hv.h"
+#include "lib/hv_vhca.h"
+
+struct mlx5_hv_vhca {
+ struct mlx5_core_dev *dev;
+ struct workqueue_struct *work_queue;
+ struct mlx5_hv_vhca_agent *agents[MLX5_HV_VHCA_AGENT_MAX];
+ struct mutex agents_lock; /* Protect agents array */
+};
+
+struct mlx5_hv_vhca_work {
+ struct work_struct invalidate_work;
+ struct mlx5_hv_vhca *hv_vhca;
+ u64 block_mask;
+};
+
+struct mlx5_hv_vhca_data_block {
+ u16 sequence;
+ u16 offset;
+ u8 reserved[4];
+ u64 data[15];
+};
+
+struct mlx5_hv_vhca_agent {
+ enum mlx5_hv_vhca_agent_type type;
+ struct mlx5_hv_vhca *hv_vhca;
+ void *priv;
+ u16 seq;
+ void (*control)(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_control_block *block);
+ void (*invalidate)(struct mlx5_hv_vhca_agent *agent,
+ u64 block_mask);
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent);
+};
+
+struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_hv_vhca *hv_vhca = NULL;
+
+ hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL);
+ if (!hv_vhca)
+ return ERR_PTR(-ENOMEM);
+
+ hv_vhca->work_queue = create_singlethread_workqueue("mlx5_hv_vhca");
+ if (!hv_vhca->work_queue) {
+ kfree(hv_vhca);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ hv_vhca->dev = dev;
+ mutex_init(&hv_vhca->agents_lock);
+
+ return hv_vhca;
+}
+
+void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca)
+{
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return;
+
+ destroy_workqueue(hv_vhca->work_queue);
+ kfree(hv_vhca);
+}
+
+static void mlx5_hv_vhca_invalidate_work(struct work_struct *work)
+{
+ struct mlx5_hv_vhca_work *hwork;
+ struct mlx5_hv_vhca *hv_vhca;
+ int i;
+
+ hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work);
+ hv_vhca = hwork->hv_vhca;
+
+ mutex_lock(&hv_vhca->agents_lock);
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (!agent || !agent->invalidate)
+ continue;
+
+ if (!(BIT(agent->type) & hwork->block_mask))
+ continue;
+
+ agent->invalidate(agent, hwork->block_mask);
+ }
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ kfree(hwork);
+}
+
+void mlx5_hv_vhca_invalidate(void *context, u64 block_mask)
+{
+ struct mlx5_hv_vhca *hv_vhca = (struct mlx5_hv_vhca *)context;
+ struct mlx5_hv_vhca_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work);
+ work->hv_vhca = hv_vhca;
+ work->block_mask = block_mask;
+
+ queue_work(hv_vhca->work_queue, &work->invalidate_work);
+}
+
+#define AGENT_MASK(type) (type ? BIT(type - 1) : 0 /* control */)
+
+static void mlx5_hv_vhca_agents_control(struct mlx5_hv_vhca *hv_vhca,
+ struct mlx5_hv_vhca_control_block *block)
+{
+ int i;
+
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (!agent || !agent->control)
+ continue;
+
+ if (!(AGENT_MASK(agent->type) & block->control))
+ continue;
+
+ agent->control(agent, block);
+ }
+}
+
+static void mlx5_hv_vhca_capabilities(struct mlx5_hv_vhca *hv_vhca,
+ u32 *capabilities)
+{
+ int i;
+
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (agent)
+ *capabilities |= AGENT_MASK(agent->type);
+ }
+}
+
+static void
+mlx5_hv_vhca_control_agent_invalidate(struct mlx5_hv_vhca_agent *agent,
+ u64 block_mask)
+{
+ struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
+ struct mlx5_core_dev *dev = hv_vhca->dev;
+ struct mlx5_hv_vhca_control_block *block;
+ u32 capabilities = 0;
+ int err;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return;
+
+ err = mlx5_hv_read_config(dev, block, sizeof(*block), 0);
+ if (err)
+ goto free_block;
+
+ mlx5_hv_vhca_capabilities(hv_vhca, &capabilities);
+
+ /* In case no capabilities, send empty block in return */
+ if (!capabilities) {
+ memset(block, 0, sizeof(*block));
+ goto write;
+ }
+
+ if (block->capabilities != capabilities)
+ block->capabilities = capabilities;
+
+ if (block->control & ~capabilities)
+ goto free_block;
+
+ mlx5_hv_vhca_agents_control(hv_vhca, block);
+ block->command_ack = block->command;
+
+write:
+ mlx5_hv_write_config(dev, block, sizeof(*block), 0);
+
+free_block:
+ kfree(block);
+}
+
+static struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_control_agent_create(struct mlx5_hv_vhca *hv_vhca)
+{
+ return mlx5_hv_vhca_agent_create(hv_vhca, MLX5_HV_VHCA_AGENT_CONTROL,
+ NULL,
+ mlx5_hv_vhca_control_agent_invalidate,
+ NULL, NULL);
+}
+
+static void mlx5_hv_vhca_control_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+ mlx5_hv_vhca_agent_destroy(agent);
+}
+
+int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca)
+{
+ struct mlx5_hv_vhca_agent *agent;
+ int err;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return IS_ERR_OR_NULL(hv_vhca);
+
+ err = mlx5_hv_register_invalidate(hv_vhca->dev, hv_vhca,
+ mlx5_hv_vhca_invalidate);
+ if (err)
+ return err;
+
+ agent = mlx5_hv_vhca_control_agent_create(hv_vhca);
+ if (IS_ERR_OR_NULL(agent)) {
+ mlx5_hv_unregister_invalidate(hv_vhca->dev);
+ return IS_ERR_OR_NULL(agent);
+ }
+
+ hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL] = agent;
+
+ return 0;
+}
+
+void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca)
+{
+ struct mlx5_hv_vhca_agent *agent;
+ int i;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return;
+
+ agent = hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL];
+ if (agent)
+ mlx5_hv_vhca_control_agent_destroy(agent);
+
+ mutex_lock(&hv_vhca->agents_lock);
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++)
+ WARN_ON(hv_vhca->agents[i]);
+
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ mlx5_hv_unregister_invalidate(hv_vhca->dev);
+}
+
+static void mlx5_hv_vhca_agents_update(struct mlx5_hv_vhca *hv_vhca)
+{
+ mlx5_hv_vhca_invalidate(hv_vhca, BIT(MLX5_HV_VHCA_AGENT_CONTROL));
+}
+
+struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleaup)(struct mlx5_hv_vhca_agent *agent),
+ void *priv)
+{
+ struct mlx5_hv_vhca_agent *agent;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return ERR_PTR(-ENOMEM);
+
+ if (type >= MLX5_HV_VHCA_AGENT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&hv_vhca->agents_lock);
+ if (hv_vhca->agents[type]) {
+ mutex_unlock(&hv_vhca->agents_lock);
+ return ERR_PTR(-EINVAL);
+ }
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ agent = kzalloc(sizeof(*agent), GFP_KERNEL);
+ if (!agent)
+ return ERR_PTR(-ENOMEM);
+
+ agent->type = type;
+ agent->hv_vhca = hv_vhca;
+ agent->priv = priv;
+ agent->control = control;
+ agent->invalidate = invalidate;
+ agent->cleanup = cleaup;
+
+ mutex_lock(&hv_vhca->agents_lock);
+ hv_vhca->agents[type] = agent;
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ mlx5_hv_vhca_agents_update(hv_vhca);
+
+ return agent;
+}
+
+void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+ struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
+
+ mutex_lock(&hv_vhca->agents_lock);
+
+ if (WARN_ON(agent != hv_vhca->agents[agent->type])) {
+ mutex_unlock(&hv_vhca->agents_lock);
+ return;
+ }
+
+ hv_vhca->agents[agent->type] = NULL;
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ if (agent->cleanup)
+ agent->cleanup(agent);
+
+ kfree(agent);
+
+ mlx5_hv_vhca_agents_update(hv_vhca);
+}
+
+static int mlx5_hv_vhca_data_block_prepare(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_data_block *data_block,
+ void *src, int len, int *offset)
+{
+ int bytes = min_t(int, (int)sizeof(data_block->data), len);
+
+ data_block->sequence = agent->seq;
+ data_block->offset = (*offset)++;
+ memcpy(data_block->data, src, bytes);
+
+ return bytes;
+}
+
+static void mlx5_hv_vhca_agent_seq_update(struct mlx5_hv_vhca_agent *agent)
+{
+ agent->seq++;
+}
+
+int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len)
+{
+ int offset = agent->type * HV_CONFIG_BLOCK_SIZE_MAX;
+ int block_offset = 0;
+ int total = 0;
+ int err;
+
+ while (len) {
+ struct mlx5_hv_vhca_data_block data_block = {0};
+ int bytes;
+
+ bytes = mlx5_hv_vhca_data_block_prepare(agent, &data_block,
+ buf + total,
+ len, &block_offset);
+ if (!bytes)
+ return -ENOMEM;
+
+ err = mlx5_hv_write_config(agent->hv_vhca->dev, &data_block,
+ sizeof(data_block), offset);
+ if (err)
+ return err;
+
+ total += bytes;
+ len -= bytes;
+ }
+
+ mlx5_hv_vhca_agent_seq_update(agent);
+
+ return 0;
+}
+
+void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent)
+{
+ return agent->priv;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
new file mode 100644
index 000000000000..4bad6a5fde56
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __LIB_HV_VHCA_H__
+#define __LIB_HV_VHCA_H__
+
+#include "en.h"
+#include "lib/hv.h"
+
+struct mlx5_hv_vhca_agent;
+struct mlx5_hv_vhca;
+struct mlx5_hv_vhca_control_block;
+
+enum mlx5_hv_vhca_agent_type {
+ MLX5_HV_VHCA_AGENT_CONTROL = 0,
+ MLX5_HV_VHCA_AGENT_STATS = 1,
+ MLX5_HV_VHCA_AGENT_MAX = 32,
+};
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+struct mlx5_hv_vhca_control_block {
+ u32 capabilities;
+ u32 control;
+ u16 command;
+ u16 command_ack;
+ u16 version;
+ u16 rings;
+ u32 reserved1[28];
+};
+
+struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev);
+void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca);
+int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca);
+void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca);
+void mlx5_hv_vhca_invalidate(void *context, u64 block_mask);
+
+struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent),
+ void *context);
+
+void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent);
+int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len);
+void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent);
+
+#else
+
+static inline struct mlx5_hv_vhca *
+mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
+{
+ return NULL;
+}
+
+static inline void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca)
+{
+}
+
+static inline int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca)
+{
+ return 0;
+}
+
+static inline void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca)
+{
+}
+
+static inline void mlx5_hv_vhca_invalidate(void *context,
+ u64 block_mask)
+{
+}
+
+static inline struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent),
+ void *context)
+{
+ return NULL;
+}
+
+static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+}
+
+static inline int
+mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len)
+{
+ return 0;
+}
+#endif
+
+#endif /* __LIB_HV_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index b9d4f4e19ff9..148b55c3db7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/refcount.h>
#include <linux/mlx5/driver.h>
#include <net/vxlan.h>
#include "mlx5_core.h"
@@ -48,7 +49,7 @@ struct mlx5_vxlan {
struct mlx5_vxlan_port {
struct hlist_node hlist;
- atomic_t refcount;
+ refcount_t refcount;
u16 udp_port;
};
@@ -113,7 +114,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
if (vxlanp) {
- atomic_inc(&vxlanp->refcount);
+ refcount_inc(&vxlanp->refcount);
return 0;
}
@@ -137,7 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
}
vxlanp->udp_port = port;
- atomic_set(&vxlanp->refcount, 1);
+ refcount_set(&vxlanp->refcount, 1);
spin_lock_bh(&vxlan->lock);
hash_add(vxlan->htable, &vxlanp->hlist, port);
@@ -170,7 +171,7 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
goto out_unlock;
}
- if (atomic_dec_and_test(&vxlanp->refcount)) {
+ if (refcount_dec_and_test(&vxlanp->refcount)) {
hash_del(&vxlanp->hlist);
remove = true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b15b27a497fc..dee1a8658c87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -69,6 +69,7 @@
#include "lib/pci_vsc.h"
#include "diag/fw_tracer.h"
#include "ecpf.h"
+#include "lib/hv_vhca.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -495,6 +496,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
if (do_set)
err = set_caps(dev, set_ctx, set_sz,
@@ -826,11 +833,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eq_cleanup;
}
- err = mlx5_cq_debugfs_init(dev);
- if (err) {
- mlx5_core_err(dev, "failed to initialize cq debugfs\n");
- goto err_events_cleanup;
- }
+ mlx5_cq_debugfs_init(dev);
mlx5_init_qp_table(dev);
@@ -874,6 +877,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
}
dev->tracer = mlx5_fw_tracer_create(dev);
+ dev->hv_vhca = mlx5_hv_vhca_create(dev);
return 0;
@@ -891,7 +895,6 @@ err_tables_cleanup:
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
-err_events_cleanup:
mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
@@ -905,6 +908,7 @@ err_devcom:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
@@ -1072,6 +1076,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fw_tracer;
}
+ mlx5_hv_vhca_init(dev->hv_vhca);
+
err = mlx5_fpga_device_start(dev);
if (err) {
mlx5_core_err(dev, "fpga device start failed %d\n", err);
@@ -1127,6 +1133,7 @@ err_tls_start:
err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
+ mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
mlx5_eq_table_destroy(dev);
@@ -1147,6 +1154,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
+ mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
@@ -1217,8 +1225,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
int err = 0;
- if (cleanup)
+ if (cleanup) {
+ mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev);
+ }
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1369,7 +1379,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
- mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, true)) {
mlx5_core_err(dev, "mlx5_unload_one failed\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 471bbc48bc1f..87b75b2207c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -146,7 +146,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b8ba74de9555..c3aea4cc2fff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -53,7 +53,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
common = radix_tree_lookup(&table->tree, rsn);
if (common)
- atomic_inc(&common->refcount);
+ refcount_inc(&common->refcount);
spin_unlock_irqrestore(&table->lock, flags);
@@ -62,7 +62,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
{
- if (atomic_dec_and_test(&common->refcount))
+ if (refcount_dec_and_test(&common->refcount))
complete(&common->free);
}
@@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
common = mlx5_get_rsc(table, rsn);
if (!common) {
- mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
+ mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
return NOTIFY_OK;
}
@@ -209,7 +209,7 @@ static int create_resource_common(struct mlx5_core_dev *dev,
if (err)
return err;
- atomic_set(&qp->common.refcount, 1);
+ refcount_set(&qp->common.refcount, 1);
init_completion(&qp->common.free);
qp->pid = current->pid;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 17ce9dd56b13..18af6981e0be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -51,7 +51,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
return -ENOMEM;
}
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX);
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL);
if (!ns) {
mlx5_core_err(dev, "Failed to get RDMA RX namespace");
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index c912d82ca64b..30f7848a6f88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode)
{
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
+ break;
+ /* fall through */
case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2;
break;
- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
- mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
- break;
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
*min_inline_mode = MLX5_INLINE_MODE_NONE;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 953cc8efba69..dd2315ce4441 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -44,6 +44,11 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
return wq->fbc.sz_m1 + 1;
}
+u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.log_stride;
+}
+
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{
return (u32)wq->fbc.sz_m1 + 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index f1ec58c9e9e3..55791f71a778 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -89,6 +89,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 06c80343d9ed..f458fd1ce9f8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -71,7 +71,7 @@ config MLXSW_SWITCHX2
module will be called mlxsw_switchx2.
config MLXSW_SPECTRUM
- tristate "Mellanox Technologies Spectrum support"
+ tristate "Mellanox Technologies Spectrum family support"
depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
depends on PSAMPLE || PSAMPLE=n
depends on BRIDGE || BRIDGE=n
@@ -87,8 +87,8 @@ config MLXSW_SPECTRUM
select NET_PTP_CLASSIFY if PTP_1588_CLOCK
default m
---help---
- This driver supports Mellanox Technologies Spectrum Ethernet
- Switch ASICs.
+ This driver supports Mellanox Technologies
+ Spectrum/Spectrum-2/Spectrum-3 Ethernet Switch ASICs.
To compile this driver as a module, choose M here: the
module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 171b36bd8a4e..0e86a581d45b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -29,7 +29,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
- spectrum_dpipe.o
+ spectrum_dpipe.o spectrum_trap.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 17ceac7505e5..963a2b4b61b1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1017,6 +1017,54 @@ static int mlxsw_devlink_flash_update(struct devlink *devlink,
component, extack);
}
+static int mlxsw_devlink_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
+}
+
+static void mlxsw_devlink_trap_fini(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_fini)
+ return;
+ mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
+}
+
+static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_action_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_action_set(mlxsw_core, trap, action);
+}
+
+static int
+mlxsw_devlink_trap_group_init(struct devlink *devlink,
+ const struct devlink_trap_group *group)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_group_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_group_init(mlxsw_core, group);
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
.reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
@@ -1034,6 +1082,10 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
.info_get = mlxsw_devlink_info_get,
.flash_update = mlxsw_devlink_flash_update,
+ .trap_init = mlxsw_devlink_trap_init,
+ .trap_fini = mlxsw_devlink_trap_fini,
+ .trap_action_set = mlxsw_devlink_trap_action_set,
+ .trap_group_init = mlxsw_devlink_trap_group_init,
};
static int
@@ -1477,6 +1529,18 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
+int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ enum mlxsw_reg_hpkt_action action)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
+ listener->trap_group, listener->is_ctrl);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+EXPORT_SYMBOL(mlxsw_core_trap_action_set);
+
static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
{
return atomic64_inc_return(&mlxsw_core->emad.tid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 8efcff4b59cb..b65a17d49e43 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -128,6 +128,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
void *priv);
+int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ enum mlxsw_reg_hpkt_action action);
typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
size_t payload_len, unsigned long cb_priv);
@@ -289,6 +292,15 @@ struct mlxsw_driver {
int (*flash_update)(struct mlxsw_core *mlxsw_core,
const char *file_name, const char *component,
struct netlink_ext_ack *extack);
+ int (*trap_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+ void (*trap_fini)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+ int (*trap_action_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action);
+ int (*trap_group_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 946339e13eb9..5b1323645a5d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -9,6 +9,7 @@
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ead36702549a..5494cf93f34c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4126,7 +4126,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
-#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
@@ -5422,6 +5421,14 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+
+ __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
+};
+
+enum mlxsw_reg_htgt_discard_trap_group {
+ MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
};
/* reg_htgt_trap_group
@@ -5559,6 +5566,8 @@ enum mlxsw_reg_hpkt_action {
MLXSW_REG_HPKT_ACTION_DISCARD,
MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT = 15,
};
/* reg_hpkt_action
@@ -5569,6 +5578,8 @@ enum mlxsw_reg_hpkt_action {
* 3 - Discard.
* 4 - Soft discard (allow other traps to act on the packet).
* 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * 6 - Trap to CPU (CPU receives sole copy) and count it as error.
+ * 15 - Restore the firmware's default action.
* Access: RW
*
* Note: Must be set to 0 (forward) for event trap IDs, as they are already
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index eda9c23e87b2..91e4792bb7e7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -48,7 +48,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 1122
+#define MLXSW_SP1_FWREV_SUBMINOR 1886
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -65,6 +65,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
+static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
static const char mlxsw_sp_driver_version[] = "1.0";
static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
@@ -174,6 +175,10 @@ struct mlxsw_sp_ptp_ops {
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info);
+ int (*get_stats_count)(void);
+ void (*get_stats_strings)(u8 **p);
+ void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index);
};
static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
@@ -1625,7 +1630,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
}
flow_block_cb_incref(block_cb);
err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress);
+ mlxsw_sp_port, ingress, f->extack);
if (err)
goto err_block_bind;
@@ -2328,6 +2333,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
static void mlxsw_sp_port_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
u8 *p = data;
int i;
@@ -2369,6 +2375,7 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
for (i = 0; i < TC_MAX_QUEUE; i++)
mlxsw_sp_port_get_tc_strings(&p, i);
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p);
break;
}
}
@@ -2463,6 +2470,7 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
static void mlxsw_sp_port_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int i, data_index = 0;
/* IEEE 802.3 Counters */
@@ -2503,13 +2511,21 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
}
+
+ /* PTP counters */
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port,
+ data, data_index);
+ data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
}
static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
+ return MLXSW_SP_PORT_ETHTOOL_STATS_LEN +
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
default:
return -EOPNOTSUPP;
}
@@ -2608,26 +2624,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
.speed = SPEED_50000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
- .speed = SPEED_56000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
- .speed = SPEED_56000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
- .speed = SPEED_56000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
- .speed = SPEED_56000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
.mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
.speed = SPEED_100000,
@@ -2674,7 +2670,7 @@ mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- unsigned long *mode)
+ u8 width, unsigned long *mode)
{
int i;
@@ -2715,7 +2711,7 @@ mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
}
static u32
-mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
+mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
@@ -2729,7 +2725,8 @@ mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
return ptys_proto;
}
-static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
+static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width,
+ u32 speed)
{
u32 ptys_proto = 0;
int i;
@@ -2917,11 +2914,31 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
+#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0)
+#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1)
+#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2)
+
+static u8 mlxsw_sp_port_mask_width_get(u8 width)
+{
+ switch (width) {
+ case 1:
+ return MLXSW_SP_PORT_MASK_WIDTH_1X;
+ case 2:
+ return MLXSW_SP_PORT_MASK_WIDTH_2X;
+ case 4:
+ return MLXSW_SP_PORT_MASK_WIDTH_4X;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
struct mlxsw_sp2_port_link_mode {
const enum ethtool_link_mode_bit_indices *mask_ethtool;
int m_ethtool_len;
u32 mask;
u32 speed;
+ u8 mask_width;
};
static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
@@ -2929,72 +2946,97 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
.mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_100,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
.mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_1000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
.mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_2500,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
.mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_5000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
.mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_25000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_50000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
.speed = SPEED_50000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_100000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
.mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
.speed = SPEED_100000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_200000,
},
};
@@ -3022,12 +3064,14 @@ mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
static void
mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- unsigned long *mode)
+ u8 width, unsigned long *mode)
{
+ u8 mask_width = mlxsw_sp_port_mask_width_get(width);
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
+ if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) &&
+ (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
mode);
}
@@ -3078,27 +3122,32 @@ mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
}
static u32
-mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
+mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd)
{
+ u8 mask_width = mlxsw_sp_port_mask_width_get(width);
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+ if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) &&
+ mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
cmd->link_modes.advertising))
ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
}
return ptys_proto;
}
-static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
+static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp,
+ u8 width, u32 speed)
{
+ u8 mask_width = mlxsw_sp_port_mask_width_get(width);
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (speed == mlxsw_sp2_port_link_mode[i].speed)
+ if ((speed == mlxsw_sp2_port_link_mode[i].speed) &&
+ (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
}
return ptys_proto;
@@ -3182,7 +3231,7 @@ mlxsw_sp2_port_type_speed_ops = {
static void
mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
- struct ethtool_link_ksettings *cmd)
+ u8 width, struct ethtool_link_ksettings *cmd)
{
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -3193,12 +3242,13 @@ mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
- ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported);
+ ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width,
+ cmd->link_modes.supported);
}
static void
mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
- u32 eth_proto_admin, bool autoneg,
+ u32 eth_proto_admin, bool autoneg, u8 width,
struct ethtool_link_ksettings *cmd)
{
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -3209,7 +3259,7 @@ mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
return;
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
- ops->from_ptys_link(mlxsw_sp, eth_proto_admin,
+ ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width,
cmd->link_modes.advertising);
}
@@ -3264,10 +3314,11 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
&eth_proto_admin, &eth_proto_oper);
- mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd);
+ mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
+ mlxsw_sp_port->mapping.width, cmd);
mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
- cmd);
+ mlxsw_sp_port->mapping.width, cmd);
cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
@@ -3300,13 +3351,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap, NULL, NULL);
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
- if (!autoneg && cmd->base.speed == SPEED_56000) {
- netdev_err(dev, "56G not supported with autoneg off\n");
- return -EINVAL;
- }
eth_proto_new = autoneg ?
- ops->to_ptys_advert_link(mlxsw_sp, cmd) :
- ops->to_ptys_speed(mlxsw_sp, cmd->base.speed);
+ ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width,
+ cmd) :
+ ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width,
+ cmd->base.speed);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
@@ -4609,6 +4658,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp1_ptp_shaper_work,
.get_ts_info = mlxsw_sp1_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp1_get_stats_count,
+ .get_stats_strings = mlxsw_sp1_get_stats_strings,
+ .get_stats = mlxsw_sp1_get_stats,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -4622,6 +4674,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp2_ptp_shaper_work,
.get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
};
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
@@ -4664,6 +4719,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_traps_init;
}
+ err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
+ goto err_devlink_traps_init;
+ }
+
err = mlxsw_sp_buffers_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
@@ -4797,6 +4858,8 @@ err_span_init:
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
+ mlxsw_sp_devlink_traps_fini(mlxsw_sp);
+err_devlink_traps_init:
mlxsw_sp_traps_fini(mlxsw_sp);
err_traps_init:
mlxsw_sp_fids_fini(mlxsw_sp);
@@ -4869,6 +4932,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
+ mlxsw_sp_devlink_traps_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
@@ -5026,6 +5090,26 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
return 0;
}
+static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params kvd_size_params;
+ u32 kvd_size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
return mlxsw_sp1_resources_kvd_register(mlxsw_core);
@@ -5033,7 +5117,7 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
{
- return 0;
+ return mlxsw_sp2_resources_kvd_register(mlxsw_core);
}
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -5230,6 +5314,10 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
@@ -5260,6 +5348,43 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
+ .res_query_enabled = true,
+};
+
+static struct mlxsw_driver mlxsw_sp3_driver = {
+ .kind = mlxsw_sp3_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp2_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
@@ -6304,6 +6429,16 @@ static struct pci_driver mlxsw_sp2_pci_driver = {
.id_table = mlxsw_sp2_pci_id_table,
};
+static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp3_pci_driver = {
+ .name = mlxsw_sp3_driver_name,
+ .id_table = mlxsw_sp3_pci_id_table,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
@@ -6319,6 +6454,10 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp2_core_driver_register;
+ err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
+ if (err)
+ goto err_sp3_core_driver_register;
+
err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
goto err_sp1_pci_driver_register;
@@ -6327,11 +6466,19 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp2_pci_driver_register;
+ err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
+ if (err)
+ goto err_sp3_pci_driver_register;
+
return 0;
+err_sp3_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
err_sp2_pci_driver_register:
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
+err_sp3_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
err_sp2_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
@@ -6343,8 +6490,10 @@ err_sp1_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
+ mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
@@ -6359,4 +6508,5 @@ MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 6664119fb0c8..b2a0028b1694 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -225,6 +225,16 @@ struct mlxsw_sp_port_xstats {
u64 tx_packets[IEEE_8021QAZ_MAX_TCS];
};
+struct mlxsw_sp_ptp_port_dir_stats {
+ u64 packets;
+ u64 timestamps;
+};
+
+struct mlxsw_sp_ptp_port_stats {
+ struct mlxsw_sp_ptp_port_dir_stats rx_gcd;
+ struct mlxsw_sp_ptp_port_dir_stats tx_gcd;
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
@@ -271,6 +281,7 @@ struct mlxsw_sp_port {
struct hwtstamp_config hwtstamp_config;
u16 ing_types;
u16 egr_types;
+ struct mlxsw_sp_ptp_port_stats stats;
} ptp;
};
@@ -279,14 +290,14 @@ struct mlxsw_sp_port_type_speed_ops {
u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- unsigned long *mode);
+ u8 width, unsigned long *mode);
u32 (*from_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto);
void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
bool carrier_ok, u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
- u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp,
+ u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
- u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 speed);
+ u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
u32 (*to_ptys_upper_speed)(struct mlxsw_sp *mlxsw_sp, u32 upper_speed);
int (*port_speed_base)(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u32 *base_speed);
@@ -623,7 +634,8 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
- u8 action_created:1;
+ u8 action_created:1,
+ egress_bind_blocker:1;
unsigned int counter_index;
};
@@ -642,6 +654,7 @@ struct mlxsw_sp_acl_block {
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
unsigned int disable_count;
+ unsigned int egress_blocker_rule_count;
struct net *net;
};
@@ -657,7 +670,8 @@ void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
+ bool ingress,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
@@ -955,4 +969,17 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_trap.c */
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action);
+int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 84a87d059333..150b3a144b83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -239,7 +239,8 @@ mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress)
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_acl_block_binding *binding;
int err;
@@ -247,6 +248,11 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
+ if (!ingress && block->egress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
if (!binding)
return -ENOMEM;
@@ -672,6 +678,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
int err;
err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
@@ -689,14 +696,14 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
* one, to be directly bound to device. The rest of the
* rulesets are bound by "Goto action set".
*/
- err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset,
- ruleset->ht_key.block);
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
goto err_ruleset_block_bind;
}
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
- ruleset->ht_key.block->rule_count++;
+ block->rule_count++;
+ block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
err_ruleset_block_bind:
@@ -712,7 +719,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
if (!ruleset->ht_key.chain_index &&
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 202e9a246019..0ad1a24abfc6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -78,6 +78,16 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid;
u16 fid_index;
+ if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ /* Forbid block with this rulei to be bound
+ * to egress in future.
+ */
+ rulei->egress_bind_blocker = 1;
+
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
@@ -257,6 +267,12 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
flow_rule_match_tcp(rule, &match);
+ if (match.mask->flags & htons(0x0E00)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
+ dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
+ return -EINVAL;
+ }
+
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
ntohs(match.key->flags),
ntohs(match.mask->flags));
@@ -390,6 +406,12 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
+
+ /* Forbid block with this rulei to be bound
+ * to egress in future.
+ */
+ rulei->egress_bind_blocker = 1;
+
if (match.mask->vlan_id != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VID,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 38bb1cfe4e8c..ec2ff3d7f41c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -630,6 +630,8 @@ static void
mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
+ struct mlxsw_sp_ptp_port_dir_stats *stats;
+ struct mlxsw_sp_port *mlxsw_sp_port;
int err;
/* If an unmatched entry has an SKB, it has to be handed over to the
@@ -650,6 +652,17 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
/* The packet was matched with timestamp during the walk. */
goto out;
+ mlxsw_sp_port = ptp_state->mlxsw_sp->ports[unmatched->key.local_port];
+ if (mlxsw_sp_port) {
+ stats = unmatched->key.ingress ?
+ &mlxsw_sp_port->ptp.stats.rx_gcd :
+ &mlxsw_sp_port->ptp.stats.tx_gcd;
+ if (unmatched->skb)
+ stats->packets++;
+ else
+ stats->timestamps++;
+ }
+
/* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While
* the comment at that function states that it can only be called in
* soft IRQ context, this pattern of local_bh_disable() +
@@ -1098,3 +1111,57 @@ int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+
+struct mlxsw_sp_ptp_port_stat {
+ char str[ETH_GSTRING_LEN];
+ ptrdiff_t offset;
+};
+
+#define MLXSW_SP_PTP_PORT_STAT(NAME, FIELD) \
+ { \
+ .str = NAME, \
+ .offset = offsetof(struct mlxsw_sp_ptp_port_stats, \
+ FIELD), \
+ }
+
+static const struct mlxsw_sp_ptp_port_stat mlxsw_sp_ptp_port_stats[] = {
+ MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_packets", rx_gcd.packets),
+ MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_timestamps", rx_gcd.timestamps),
+ MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_packets", tx_gcd.packets),
+ MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_timestamps", tx_gcd.timestamps),
+};
+
+#undef MLXSW_SP_PTP_PORT_STAT
+
+#define MLXSW_SP_PTP_PORT_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_ptp_port_stats)
+
+int mlxsw_sp1_get_stats_count(void)
+{
+ return MLXSW_SP_PTP_PORT_STATS_LEN;
+}
+
+void mlxsw_sp1_get_stats_strings(u8 **p)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
+ memcpy(*p, mlxsw_sp_ptp_port_stats[i].str,
+ ETH_GSTRING_LEN);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+ void *stats = &mlxsw_sp_port->ptp.stats;
+ ptrdiff_t offset;
+ int i;
+
+ data += data_index;
+ for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
+ offset = mlxsw_sp_ptp_port_stats[i].offset;
+ *data++ = *(u64 *)(stats + offset);
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 72e55f6926b9..8c386571afce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -59,6 +59,11 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info);
+int mlxsw_sp1_get_stats_count(void);
+void mlxsw_sp1_get_stats_strings(u8 **p);
+void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index);
+
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -125,6 +130,19 @@ static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+static inline int mlxsw_sp1_get_stats_count(void)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp1_get_stats_strings(u8 **p)
+{
+}
+
+static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+}
#endif
static inline struct mlxsw_sp_ptp_clock *
@@ -183,4 +201,18 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+static inline int mlxsw_sp2_get_stats_count(void)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp2_get_stats_strings(u8 **p)
+{
+}
+
+static inline void mlxsw_sp2_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index e618be7ce6c6..a330b369e899 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2943,7 +2943,7 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
val = nh_grp->count;
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
- val ^= nh->ifindex;
+ val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
}
return jhash(&val, sizeof(val), seed);
default:
@@ -2961,7 +2961,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
- val ^= dev->ifindex;
+ val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
}
return jhash(&val, sizeof(val), seed);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
new file mode 100644
index 000000000000..899450b28621
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <net/devlink.h>
+#include <uapi/linux/devlink.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+
+#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+ void *priv);
+
+#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
+#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
+ MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \
+ false, SP_##_group_id, DISCARD)
+
+static struct devlink_trap mlxsw_sp_traps_arr[] = {
+ MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+};
+
+static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
+ MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+};
+
+/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
+ * be mapped to the same devlink trap. Order is according to
+ * 'mlxsw_sp_listeners_arr'.
+ */
+static u16 mlxsw_sp_listener_devlink_map[] = {
+ DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
+ DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
+ DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
+};
+
+static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ return 0;
+}
+
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
+ consume_skb(skb);
+}
+
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (WARN_ON(ARRAY_SIZE(mlxsw_sp_listener_devlink_map) !=
+ ARRAY_SIZE(mlxsw_sp_listeners_arr)))
+ return -EINVAL;
+
+ return devlink_traps_register(devlink, mlxsw_sp_traps_arr,
+ ARRAY_SIZE(mlxsw_sp_traps_arr),
+ mlxsw_sp);
+}
+
+void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_traps_unregister(devlink, mlxsw_sp_traps_arr,
+ ARRAY_SIZE(mlxsw_sp_traps_arr));
+}
+
+int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ struct mlxsw_listener *listener;
+ int err;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ err = mlxsw_core_trap_register(mlxsw_core, listener, trap_ctx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ struct mlxsw_listener *listener;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ mlxsw_core_trap_unregister(mlxsw_core, listener, trap_ctx);
+ }
+}
+
+int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ enum mlxsw_reg_hpkt_action hw_action;
+ struct mlxsw_listener *listener;
+ int err;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ switch (action) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ hw_action = MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT;
+ break;
+ case DEVLINK_TRAP_ACTION_TRAP:
+ hw_action = MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mlxsw_core_trap_action_set(mlxsw_core, listener,
+ hw_action);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+#define MLXSW_SP_DISCARD_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
+
+static int
+mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
+ const struct devlink_trap_group *group)
+{
+ enum mlxsw_reg_qpcr_ir_units ir_units;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u16 policer_id;
+ u8 burst_size;
+ bool is_bytes;
+ u32 rate;
+
+ switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
+ is_bytes = false;
+ rate = 10 * 1024; /* 10Kpps */
+ burst_size = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, policer_id, ir_units, is_bytes, rate,
+ burst_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+static int
+__mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
+ const struct devlink_trap_group *group)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ u8 priority, tc, group_id;
+ u16 policer_id;
+
+ switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mlxsw_reg_htgt_pack(htgt_pl, group_id, policer_id, priority, tc);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+}
+
+int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp_trap_group_policer_init(mlxsw_sp, group);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_trap_group_init(mlxsw_sp, group);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index bdab96f5bc70..1c14c051ee52 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -637,12 +637,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
.speed = 50000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 19202bdb5105..7618f084cae9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -66,6 +66,13 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_STP = 0x14A,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
/* Multicast trap used for routes with trap action */
MLXSW_TRAP_ID_ACL1 = 0x1C1,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index ccd06702cc56..da329ca115cc 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -580,9 +580,7 @@ out:
dma_unmap_single(adapter->dev, sg_dma_address(sg),
DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
sg_dma_address(sg) = 0;
- if (ctl->skb)
- dev_kfree_skb(ctl->skb);
-
+ dev_kfree_skb(ctl->skb);
ctl->skb = NULL;
printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index e52b015e31a9..a41a90c589db 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1225,7 +1225,6 @@ MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
static int ks8851_probe(struct platform_device *pdev)
{
int err;
- struct resource *io_d, *io_c;
struct net_device *netdev;
struct ks_net *ks;
u16 id, data;
@@ -1240,15 +1239,13 @@ static int ks8851_probe(struct platform_device *pdev)
ks = netdev_priv(netdev);
ks->netdev = netdev;
- io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d);
+ ks->hw_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ks->hw_addr)) {
err = PTR_ERR(ks->hw_addr);
goto err_free;
}
- io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c);
+ ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ks->hw_addr_cmd)) {
err = PTR_ERR(ks->hw_addr_cmd);
goto err_free;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 13e6bf13ac4d..15a8be6bad27 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1434,7 +1434,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
}
static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
- const struct skb_frag_struct *fragment,
+ const skb_frag_t *fragment,
unsigned int frame_length)
{
/* called only from within lan743x_tx_xmit_frame
@@ -1607,9 +1607,8 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
goto finish;
for (j = 0; j < nr_frags; j++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
- frag = &(skb_shinfo(skb)->frags[j]);
if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
/* upon error no need to call
* lan743x_tx_frame_end
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index b2109eca81fd..57b26c2acf87 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -963,8 +963,7 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
index++) {
struct sk_buff *skb = ptp->tx_ts_skb_queue[index];
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
ptp->tx_ts_skb_queue[index] = NULL;
ptp->tx_ts_seconds_queue[index] = 0;
ptp->tx_ts_nseconds_queue[index] = 0;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 6932e615d4b0..4d1bce4389c7 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
#include <linux/iopoll.h>
#include <net/arp.h>
@@ -538,7 +539,7 @@ static int ocelot_port_stop(struct net_device *dev)
*/
static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
{
- ifh[0] = IFH_INJ_BYPASS;
+ ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21);
ifh[1] = (0xf00 & info->port) >> 8;
ifh[2] = (0xff & info->port) << 24;
ifh[3] = (info->tag_type << 16) | info->vid;
@@ -548,6 +549,7 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
u32 val, ifh[IFH_LEN];
@@ -566,6 +568,14 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
info.port = BIT(port->chip_port);
info.tag_type = IFH_TAG_TYPE_C;
info.vid = skb_vlan_tag_get(skb);
+
+ /* Check if timestamping is needed */
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
+ info.rew_op = port->ptp_cmd;
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ info.rew_op |= (port->ts_id % 4) << 3;
+ }
+
ocelot_gen_ifh(ifh, &info);
for (i = 0; i < IFH_LEN; i++)
@@ -596,11 +606,58 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb_any(skb);
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
+ port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ struct ocelot_skb *oskb =
+ kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
+
+ if (unlikely(!oskb))
+ goto out;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ oskb->skb = skb;
+ oskb->id = port->ts_id % 4;
+ port->ts_id++;
+
+ list_add_tail(&oskb->head, &port->skbs);
+
+ return NETDEV_TX_OK;
+ }
+
+out:
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ /* Read current PTP time to get seconds */
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+ ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+
+ /* Read packet HW timestamp from FIFO */
+ val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
+ ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
+
+ /* Sec has incremented since the ts was registered */
+ if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+}
+EXPORT_SYMBOL(ocelot_get_hwtimestamp);
+
static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
{
struct ocelot_port *port = netdev_priv(dev);
@@ -917,6 +974,97 @@ static int ocelot_get_port_parent_id(struct net_device *dev,
return 0;
}
+static int ocelot_hwstamp_get(struct ocelot_port *port, struct ifreq *ifr)
+{
+ struct ocelot *ocelot = port->ocelot;
+
+ return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
+ sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ /* Tx type sanity check */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
+ * need to update the origin time.
+ */
+ port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&ocelot->ptp_lock);
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ mutex_unlock(&ocelot->ptp_lock);
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&ocelot->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ /* The function is only used for PTP operations for now */
+ if (!ocelot->ptp)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return ocelot_hwstamp_set(port, ifr);
+ case SIOCGHWTSTAMP:
+ return ocelot_hwstamp_get(port, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -933,6 +1081,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_get_port_parent_id = ocelot_get_port_parent_id,
.ndo_setup_tc = ocelot_setup_tc,
+ .ndo_do_ioctl = ocelot_ioctl,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1014,12 +1163,37 @@ static int ocelot_get_sset_count(struct net_device *dev, int sset)
return ocelot->num_stats;
}
+static int ocelot_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ if (!ocelot->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ info->phc_index = ocelot->ptp_clock ?
+ ptp_clock_index(ocelot->ptp_clock) : -1;
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_get_strings,
.get_ethtool_stats = ocelot_get_ethtool_stats,
.get_sset_count = ocelot_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ts_info = ocelot_get_ts_info,
};
static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
@@ -1629,6 +1803,196 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ time64_t s;
+ u32 val;
+ s64 ns;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
+ s <<= 32;
+ s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_gettime64);
+
+static int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+
+static int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using ocelot_ptp_settime64 which is not exact. */
+ struct timespec64 ts;
+ u64 now;
+
+ ocelot_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ocelot_ptp_settime64(ptp, &ts);
+ }
+ return 0;
+}
+
+static int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ u32 unit = 0, direction = 0;
+ unsigned long flags;
+ u64 adj = 0;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ if (!scaled_ppm)
+ goto disable_adj;
+
+ if (scaled_ppm < 0) {
+ direction = PTP_CFG_CLK_ADJ_CFG_DIR;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = PSEC_PER_SEC << 16;
+ do_div(adj, scaled_ppm);
+ do_div(adj, 1000);
+
+ /* If the adjustment value is too large, use ns instead */
+ if (adj >= (1L << 30)) {
+ unit = PTP_CFG_CLK_ADJ_FREQ_NS;
+ do_div(adj, 1000);
+ }
+
+ /* Still too big */
+ if (adj >= (1L << 30))
+ goto disable_adj;
+
+ ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
+ ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
+ PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+
+disable_adj:
+ ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ocelot ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+};
+
+static int ocelot_init_timestamp(struct ocelot *ocelot)
+{
+ ocelot->ptp_info = ocelot_ptp_clock_info;
+ ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
+ if (IS_ERR(ocelot->ptp_clock))
+ return PTR_ERR(ocelot->ptp_clock);
+ /* Check if PHC support is missing at the configuration level */
+ if (!ocelot->ptp_clock)
+ return 0;
+
+ ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
+
+ ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
+
+ /* There is no device reconfiguration, PTP Rx stamping is always
+ * enabled.
+ */
+ ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
@@ -1661,6 +2025,8 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
+ INIT_LIST_HEAD(&ocelot_port->skbs);
+
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
@@ -1684,7 +2050,7 @@ EXPORT_SYMBOL(ocelot_probe_port);
int ocelot_init(struct ocelot *ocelot)
{
u32 port;
- int i, cpu = ocelot->num_phys_ports;
+ int i, ret, cpu = ocelot->num_phys_ports;
char queue_name[32];
ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
@@ -1699,6 +2065,8 @@ int ocelot_init(struct ocelot *ocelot)
return -ENOMEM;
mutex_init(&ocelot->stats_lock);
+ mutex_init(&ocelot->ptp_lock);
+ spin_lock_init(&ocelot->ptp_clock_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
@@ -1812,16 +2180,43 @@ int ocelot_init(struct ocelot *ocelot)
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
+
+ if (ocelot->ptp) {
+ ret = ocelot_init_timestamp(ocelot);
+ if (ret) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ return ret;
+ }
+ }
+
return 0;
}
EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
+ struct list_head *pos, *tmp;
+ struct ocelot_port *port;
+ struct ocelot_skb *entry;
+ int i;
+
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
ocelot_ace_deinit();
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ port = ocelot->ports[i];
+
+ list_for_each_safe(pos, tmp, &port->skbs) {
+ entry = list_entry(pos, struct ocelot_skb, head);
+
+ list_del(pos);
+ dev_kfree_skb_any(entry->skb);
+ kfree(entry);
+ }
+ }
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index f7eeb4806897..e40773c01a44 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -11,9 +11,11 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
#include "ocelot_ana.h"
@@ -23,6 +25,7 @@
#include "ocelot_sys.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
+#include "ocelot_ptp.h"
#define PGID_AGGR 64
#define PGID_SRC 80
@@ -38,14 +41,17 @@
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
+#define OCELOT_PTP_QUEUE_SZ 128
+
#define IFH_LEN 4
struct frame_info {
u32 len;
u16 port;
u16 vid;
- u8 cpuq;
u8 tag_type;
+ u16 rew_op;
+ u32 timestamp; /* rew_val */
};
#define IFH_INJ_BYPASS BIT(31)
@@ -54,6 +60,12 @@ struct frame_info {
#define IFH_TAG_TYPE_C 0
#define IFH_TAG_TYPE_S 1
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
#define OCELOT_SPEED_2500 0
#define OCELOT_SPEED_1000 1
#define OCELOT_SPEED_100 2
@@ -71,6 +83,7 @@ enum ocelot_target {
SYS,
S2,
HSIO,
+ PTP,
TARGET_MAX,
};
@@ -343,6 +356,13 @@ enum ocelot_reg {
S2_CACHE_ACTION_DAT,
S2_CACHE_CNT_DAT,
S2_CACHE_TG_DAT,
+ PTP_PIN_CFG = PTP << TARGET_OFFSET,
+ PTP_PIN_TOD_SEC_MSB,
+ PTP_PIN_TOD_SEC_LSB,
+ PTP_PIN_TOD_NSEC,
+ PTP_CFG_MISC,
+ PTP_CLK_CFG_ADJ_CFG,
+ PTP_CLK_CFG_ADJ_FREQ,
};
enum ocelot_regfield {
@@ -393,6 +413,13 @@ enum ocelot_regfield {
REGFIELD_MAX
};
+enum ocelot_clk_pins {
+ ALT_PPS_PIN = 1,
+ EXT_CLK_PIN,
+ ALT_LDST_PIN,
+ TOD_ACC_PIN
+};
+
struct ocelot_multicast {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -442,6 +469,13 @@ struct ocelot {
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
+
+ u8 ptp:1;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct hwtstamp_config hwtstamp_config;
+ struct mutex ptp_lock; /* Protects the PTP interface state */
+ spinlock_t ptp_clock_lock; /* Protects the PTP clock */
};
struct ocelot_port {
@@ -465,6 +499,16 @@ struct ocelot_port {
struct phy *serdes;
struct ocelot_port_tc tc;
+
+ u8 ptp_cmd;
+ struct list_head skbs;
+ u8 ts_id;
+};
+
+struct ocelot_skb {
+ struct list_head head;
+ struct sk_buff *skb;
+ u8 id;
};
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
@@ -509,4 +553,7 @@ extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts);
+
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 2451d4a96490..b063eb78fa0c 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -16,24 +16,27 @@
#include "ocelot.h"
-static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
+#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
+
+static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
{
- int i;
u8 llen, wlen;
+ u64 ifh[2];
+
+ ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]);
+ ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]);
- /* The IFH is in network order, switch to CPU order */
- for (i = 0; i < IFH_LEN; i++)
- ifh[i] = ntohl((__force __be32)ifh[i]);
+ wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7, 8);
+ llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15, 6);
- wlen = (ifh[1] >> 7) & 0xff;
- llen = (ifh[1] >> 15) & 0x3f;
info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
- info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
+ info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32);
+
+ info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4);
- info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
- info->tag_type = (ifh[3] & BIT(16)) >> 16;
- info->vid = ifh[3] & GENMASK(11, 0);
+ info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16, 1);
+ info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0, 12);
return 0;
}
@@ -91,13 +94,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
return IRQ_NONE;
do {
- struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u64 tod_in_ns, full_ts_in_ns;
+ struct frame_info info = {};
struct net_device *dev;
- u32 *buf;
+ u32 ifh[4], val, *buf;
+ struct timespec64 ts;
int sz, len, buf_len;
- u32 ifh[4];
- u32 val;
- struct frame_info info;
+ struct sk_buff *skb;
for (i = 0; i < IFH_LEN; i++) {
err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
@@ -144,6 +148,22 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
break;
}
+ if (ocelot->ptp) {
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < info.timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ info.timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ info.timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+ }
+
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded.
*/
@@ -163,6 +183,66 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+ struct ocelot *ocelot = arg;
+
+ while (budget--) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct list_head *pos, *tmp;
+ struct sk_buff *skb = NULL;
+ struct ocelot_skb *entry;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ u32 val, id, txport;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+
+ /* Retrieve its associated skb */
+ port = ocelot->ports[txport];
+
+ list_for_each_safe(pos, tmp, &port->skbs) {
+ entry = list_entry(pos, struct ocelot_skb, head);
+ if (entry->id != id)
+ continue;
+
+ skb = entry->skb;
+
+ list_del(pos);
+ kfree(entry);
+ }
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+
+ if (unlikely(!skb))
+ continue;
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ dev_kfree_skb_any(skb);
+ }
+
+ return IRQ_HANDLED;
+}
+
static const struct of_device_id mscc_ocelot_match[] = {
{ .compatible = "mscc,vsc7514-switch" },
{ }
@@ -171,17 +251,18 @@ MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
static int mscc_ocelot_probe(struct platform_device *pdev)
{
- int err, irq;
- unsigned int i;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ int err, irq_xtr, irq_ptp_rdy;
struct ocelot *ocelot;
struct regmap *hsio;
+ unsigned int i;
u32 val;
struct {
enum ocelot_target id;
char *name;
+ u8 optional:1;
} res[] = {
{ SYS, "sys" },
{ REW, "rew" },
@@ -189,6 +270,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ ANA, "ana" },
{ QS, "qs" },
{ S2, "s2" },
+ { PTP, "ptp", 1 },
};
if (!np && !pdev->dev.platform_data)
@@ -205,8 +287,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct regmap *target;
target = ocelot_io_platform_init(ocelot, pdev, res[i].name);
- if (IS_ERR(target))
+ if (IS_ERR(target)) {
+ if (res[i].optional) {
+ ocelot->targets[res[i].id] = NULL;
+ continue;
+ }
+
return PTR_ERR(target);
+ }
ocelot->targets[res[i].id] = target;
}
@@ -223,16 +311,29 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
return err;
- irq = platform_get_irq_byname(pdev, "xtr");
- if (irq < 0)
+ irq_xtr = platform_get_irq_byname(pdev, "xtr");
+ if (irq_xtr < 0)
return -ENODEV;
- err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
return err;
+ irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
+ if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
+ err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL,
+ ocelot_ptp_rdy_irq_handler,
+ IRQF_ONESHOT, "ptp ready",
+ ocelot);
+ if (err)
+ return err;
+
+ /* Both the PTP interrupt and the PTP bank are available */
+ ocelot->ptp = 1;
+ }
+
regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.h b/drivers/net/ethernet/mscc/ocelot_ptp.h
new file mode 100644
index 000000000000..9ede14a12573
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_PTP_H_
+#define _MSCC_OCELOT_PTP_H_
+
+#define PTP_PIN_CFG_RSZ 0x20
+#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ
+
+#define PTP_PIN_CFG_DOM BIT(0)
+#define PTP_PIN_CFG_SYNC BIT(2)
+#define PTP_PIN_CFG_ACTION(x) ((x) << 3)
+#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7)
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_NOSYNC,
+ PTP_PIN_ACTION_SYNC,
+};
+
+#define PTP_CFG_MISC_PTP_EN BIT(2)
+
+#define PSEC_PER_SEC 1000000000000LL
+
+#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0)
+#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1)
+
+#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index 6c387f994ec5..e59977d20400 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -234,6 +234,16 @@ static const u32 ocelot_s2_regmap[] = {
REG(S2_CACHE_TG_DAT, 0x000388),
};
+static const u32 ocelot_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+
static const u32 *ocelot_regmap[] = {
[ANA] = ocelot_ana_regmap,
[QS] = ocelot_qs_regmap,
@@ -241,6 +251,7 @@ static const u32 *ocelot_regmap[] = {
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
[S2] = ocelot_s2_regmap,
+ [PTP] = ocelot_ptp_regmap,
};
static const struct reg_field ocelot_regfields[] = {
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 337b0cbfd153..c979f38a2e0c 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1286,7 +1286,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
{
u8 *va;
struct vlan_ethhdr *veh;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
__wsum vsum;
va = addr;
@@ -1306,8 +1306,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
skb->len -= VLAN_HLEN;
skb->data_len -= VLAN_HLEN;
frag = skb_shinfo(skb)->frags;
- frag->page_offset += VLAN_HLEN;
- skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
+ skb_frag_off_add(frag, VLAN_HLEN);
+ skb_frag_size_sub(frag, VLAN_HLEN);
}
}
@@ -1318,7 +1318,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
- struct skb_frag_struct *rx_frags;
+ skb_frag_t *rx_frags;
struct myri10ge_rx_buf *rx;
int i, idx, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
@@ -1351,7 +1351,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
return 0;
}
rx_frags = skb_shinfo(skb)->frags;
- /* Fill skb_frag_struct(s) with data from our receive */
+ /* Fill skb_frag_t(s) with data from our receive */
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
skb_fill_page_desc(skb, i, rx->info[idx].page,
@@ -1364,8 +1364,8 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
}
/* remove padding */
- rx_frags[0].page_offset += MXGEFW_PAD;
- rx_frags[0].size -= MXGEFW_PAD;
+ skb_frag_off_add(&rx_frags[0], MXGEFW_PAD);
+ skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
len -= MXGEFW_PAD;
skb->len = len;
@@ -2628,7 +2628,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
struct myri10ge_slice_state *ss;
struct mcp_kreq_ether_send *req;
struct myri10ge_tx_buf *tx;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct netdev_queue *netdev_queue;
dma_addr_t bus;
u32 low;
@@ -3037,7 +3037,6 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
- int error = 0;
netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
@@ -3049,7 +3048,7 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
} else
dev->mtu = new_mtu;
- return error;
+ return 0;
}
/*
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 5a54fe848de4..1b019fdfcd97 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,10 +2,12 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/mpls.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_mpls.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -25,6 +27,80 @@
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
+static int
+nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ size_t act_size = sizeof(struct nfp_fl_push_mpls);
+ u32 mpls_lse = 0;
+
+ push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS;
+ push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ /* BOS is optional in the TC action but required for offload. */
+ if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
+ mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push");
+ return -EOPNOTSUPP;
+ }
+
+ /* Leave MPLS TC as a default value of 0 if not explicitly set. */
+ if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
+ mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
+
+ /* Proto, label and TTL are enforced and verified for MPLS push. */
+ mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
+ mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
+ push_mpls->ethtype = act->mpls_push.proto;
+ push_mpls->lse = cpu_to_be32(mpls_lse);
+
+ return 0;
+}
+
+static void
+nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls,
+ const struct flow_action_entry *act)
+{
+ size_t act_size = sizeof(struct nfp_fl_pop_mpls);
+
+ pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS;
+ pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ pop_mpls->ethtype = act->mpls_pop.proto;
+}
+
+static void
+nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls,
+ const struct flow_action_entry *act)
+{
+ size_t act_size = sizeof(struct nfp_fl_set_mpls);
+ u32 mpls_lse = 0, mpls_mask = 0;
+
+ set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS;
+ set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT;
+ mpls_mask |= MPLS_LS_LABEL_MASK;
+ }
+ if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT;
+ mpls_mask |= MPLS_LS_TC_MASK;
+ }
+ if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT;
+ mpls_mask |= MPLS_LS_S_MASK;
+ }
+ if (act->mpls_mangle.ttl) {
+ mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT;
+ mpls_mask |= MPLS_LS_TTL_MASK;
+ }
+
+ set_mpls->lse = cpu_to_be32(mpls_lse);
+ set_mpls->lse_mask = cpu_to_be32(mpls_mask);
+}
+
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -97,7 +173,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
- struct netlink_ext_ack *extack)
+ bool pkt_host, struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv;
@@ -142,6 +218,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
return gid;
}
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
+ } else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
+ return -EOPNOTSUPP;
+ }
+
+ if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
+ return -EOPNOTSUPP;
+ }
+
+ nfp_flow->pre_tun_rule.dev = out_dev;
+
+ return 0;
} else {
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
@@ -809,7 +899,7 @@ nfp_flower_output_action(struct nfp_app *app,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated,
+ int *out_cnt, u32 *csum_updated, bool pkt_host,
struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
@@ -831,7 +921,7 @@ nfp_flower_output_action(struct nfp_app *app,
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
- tun_out_cnt, extack);
+ tun_out_cnt, pkt_host, extack);
if (err)
return err;
@@ -863,30 +953,37 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated,
- struct nfp_flower_pedit_acts *set_act,
+ struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v;
+ struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v;
+ struct nfp_fl_pop_mpls *pop_m;
+ struct nfp_fl_set_mpls *set_m;
int err;
switch (act->id) {
case FLOW_ACTION_DROP:
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
break;
+ case FLOW_ACTION_REDIRECT_INGRESS:
case FLOW_ACTION_REDIRECT:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt, csum_updated, extack);
+ out_cnt, csum_updated, *pkt_host,
+ extack);
if (err)
return err;
break;
+ case FLOW_ACTION_MIRRED_INGRESS:
case FLOW_ACTION_MIRRED:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt, csum_updated, extack);
+ out_cnt, csum_updated, *pkt_host,
+ extack);
if (err)
return err;
break;
@@ -975,6 +1072,54 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*/
*csum_updated &= ~act->csum_flags;
break;
+ case FLOW_ACTION_MPLS_PUSH:
+ if (*a_len +
+ sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ err = nfp_fl_push_mpls(psh_m, act, extack);
+ if (err)
+ return err;
+ *a_len += sizeof(struct nfp_fl_push_mpls);
+ break;
+ case FLOW_ACTION_MPLS_POP:
+ if (*a_len +
+ sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_pop_mpls(pop_m, act);
+ *a_len += sizeof(struct nfp_fl_pop_mpls);
+ break;
+ case FLOW_ACTION_MPLS_MANGLE:
+ if (*a_len +
+ sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_set_mpls(set_m, act);
+ *a_len += sizeof(struct nfp_fl_set_mpls);
+ break;
+ case FLOW_ACTION_PTYPE:
+ /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
+ if (act->ptype != PACKET_HOST)
+ return -EOPNOTSUPP;
+
+ *pkt_host = true;
+ break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
@@ -1030,6 +1175,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act;
+ bool pkt_host = false;
u32 csum_updated = 0;
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -1046,7 +1192,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated,
- &set_act, extack, i);
+ &set_act, &pkt_host, extack, i);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 0f1706ae5bfc..7eb2ec8969c3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -68,8 +68,11 @@
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
+#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
@@ -217,7 +220,8 @@ struct nfp_fl_set_ipv4_tun {
__be16 tun_flags;
u8 ttl;
u8 tos;
- __be32 extra;
+ __be16 outer_vlan_tpid;
+ __be16 outer_vlan_tci;
u8 tun_len;
u8 res2;
__be16 tun_proto;
@@ -232,6 +236,24 @@ struct nfp_fl_push_geneve {
u8 opt_data[];
};
+struct nfp_fl_push_mpls {
+ struct nfp_fl_act_head head;
+ __be16 ethtype;
+ __be32 lse;
+};
+
+struct nfp_fl_pop_mpls {
+ struct nfp_fl_act_head head;
+ __be16 ethtype;
+};
+
+struct nfp_fl_set_mpls {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 lse_mask;
+ __be32 lse;
+};
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -462,6 +484,7 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
+ NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index eb846133943b..7a20447cca19 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -781,6 +781,7 @@ static int nfp_flower_init(struct nfp_app *app)
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv);
+ app_priv->pre_tun_rule_cnt = 0;
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index af9441d5787f..31d94592a7c0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -42,6 +42,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
+#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
@@ -162,6 +163,7 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
+ * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -193,6 +195,7 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
+ int pre_tun_rule_cnt;
};
/**
@@ -218,6 +221,7 @@ struct nfp_fl_qos {
* @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC
* @qos_table: Stored info on filters implementing qos
+ * @on_bridge: Indicates if the repr is attached to a bridge
*/
struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr;
@@ -227,6 +231,7 @@ struct nfp_flower_repr_priv {
bool block_shared;
struct list_head mac_list;
struct nfp_fl_qos qos_table;
+ bool on_bridge;
};
/**
@@ -280,6 +285,11 @@ struct nfp_fl_payload {
char *action_data;
struct list_head linked_flows;
bool in_hw;
+ struct {
+ struct net_device *dev;
+ __be16 vlan_tci;
+ __be16 port_idx;
+ } pre_tun_rule;
};
struct nfp_fl_payload_link {
@@ -333,6 +343,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
}
+static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev)
+{
+ return netif_is_ovs_master(netdev);
+}
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
@@ -415,4 +430,8 @@ void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev);
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow);
+int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 457bdc60f3ee..987ae221f6be 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -61,6 +61,11 @@
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
+#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
+ (NFP_FLOWER_LAYER_PORT | \
+ NFP_FLOWER_LAYER_MAC | \
+ NFP_FLOWER_LAYER_IPV4)
+
struct nfp_flower_merge_check {
union {
struct {
@@ -489,6 +494,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
+ flow_pay->pre_tun_rule.dev = NULL;
return flow_pay;
@@ -732,28 +738,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
return act_off;
}
-static int nfp_fl_verify_post_tun_acts(char *acts, int len)
+static int
+nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
{
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
- if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
+ *vlan = (struct nfp_fl_push_vlan *)a;
+ else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
return -EOPNOTSUPP;
act_off += a->len_lw << NFP_FL_LW_SIZ;
}
+ /* Ensure any VLAN push also has an egress action. */
+ if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
+ return -EOPNOTSUPP;
+
return 0;
}
static int
+nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
+{
+ struct nfp_fl_set_ipv4_tun *tun;
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&acts[act_off];
+
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
+ tun = (struct nfp_fl_set_ipv4_tun *)a;
+ tun->outer_vlan_tpid = vlan->vlan_tpid;
+ tun->outer_vlan_tci = vlan->vlan_tci;
+
+ return 0;
+ }
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ /* Return error if no tunnel action is found. */
+ return -EOPNOTSUPP;
+}
+
+static int
nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2,
struct nfp_fl_payload *merge_flow)
{
unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
+ struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
bool tunnel_act = false;
char *merge_act;
int err;
@@ -790,18 +830,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
sub2_act_len -= pre_off2;
/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
- * a tunnel, sub_flow 2 can only have output actions for a valid merge.
+ * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
+ * valid merge.
*/
if (tunnel_act) {
char *post_tun_acts = &sub_flow2->action_data[pre_off2];
- err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
+ err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
+ &post_tun_push_vlan);
if (err)
return err;
+
+ if (post_tun_push_vlan) {
+ pre_off2 += sizeof(*post_tun_push_vlan);
+ sub2_act_len -= sizeof(*post_tun_push_vlan);
+ }
}
/* Copy remaining actions from sub_flows 1 and 2. */
memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
+
+ if (post_tun_push_vlan) {
+ /* Update tunnel action in merge to include VLAN push. */
+ err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
+ post_tun_push_vlan);
+ if (err)
+ return err;
+
+ merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
+ }
+
merge_act += sub1_act_len;
memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
@@ -945,6 +1003,106 @@ err_destroy_merge_flow:
}
/**
+ * nfp_flower_validate_pre_tun_rule()
+ * @app: Pointer to the APP handle
+ * @flow: Pointer to NFP flow representation of rule
+ * @extack: Netlink extended ACK report
+ *
+ * Verifies the flow as a pre-tunnel rule.
+ *
+ * Return: negative value on error, 0 if verified.
+ */
+static int
+nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
+ struct nfp_fl_payload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+ struct nfp_flower_mac_mpls *mac;
+ struct nfp_fl_act_head *act;
+ u8 *mask = flow->mask_data;
+ bool vlan = false;
+ int act_offset;
+ u8 key_layer;
+
+ meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
+ if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
+ u16 vlan_tci = be16_to_cpu(meta_tci->tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
+
+ key_layer = meta_tci->nfp_flow_key_layer;
+ if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
+ return -EOPNOTSUPP;
+ }
+
+ if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
+ return -EOPNOTSUPP;
+ }
+
+ /* Skip fields known to exist. */
+ mask += sizeof(struct nfp_flower_meta_tci);
+ mask += sizeof(struct nfp_flower_in_port);
+
+ /* Ensure destination MAC address is fully matched. */
+ mac = (struct nfp_flower_mac_mpls *)mask;
+ if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
+ return -EOPNOTSUPP;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
+ int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
+ int i;
+
+ mask += sizeof(struct nfp_flower_mac_mpls);
+
+ /* Ensure proto and flags are the only IP layer fields. */
+ for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
+ if (mask[i] && i != ip_flags && i != ip_proto) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Action must be a single egress or pop_vlan and egress. */
+ act_offset = 0;
+ act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
+ if (vlan) {
+ if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
+ return -EOPNOTSUPP;
+ }
+
+ act_offset += act->len_lw << NFP_FL_LW_SIZ;
+ act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
+ }
+
+ if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
+ return -EOPNOTSUPP;
+ }
+
+ act_offset += act->len_lw << NFP_FL_LW_SIZ;
+
+ /* Ensure there are no more actions after egress. */
+ if (act_offset != flow->meta.act_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
* @netdev: netdev structure.
@@ -994,6 +1152,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
+ if (flow_pay->pre_tun_rule.dev) {
+ err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
+ if (err)
+ goto err_destroy_flow;
+ }
+
err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
@@ -1006,8 +1170,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_release_metadata;
}
- err = nfp_flower_xmit_flow(app, flow_pay,
- NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (flow_pay->pre_tun_rule.dev)
+ err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
+ else
+ err = nfp_flower_xmit_flow(app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_remove_rhash;
@@ -1149,8 +1316,11 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_merge_flow;
}
- err = nfp_flower_xmit_flow(app, nfp_flow,
- NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ if (nfp_flow->pre_tun_rule.dev)
+ err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
+ else
+ err = nfp_flower_xmit_flow(app, nfp_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
/* Fall through on error. */
err_free_merge_flow:
@@ -1487,16 +1657,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
return NOTIFY_OK;
if (event == NETDEV_REGISTER) {
- err = __tc_indr_block_cb_register(netdev, app,
- nfp_flower_indr_setup_tc_cb,
- app);
+ err = __flow_indr_block_cb_register(netdev, app,
+ nfp_flower_indr_setup_tc_cb,
+ app);
if (err)
nfp_flower_cmsg_warn(app,
"Indirect block reg failed - %s\n",
netdev->name);
} else if (event == NETDEV_UNREGISTER) {
- __tc_indr_block_cb_unregister(netdev,
- nfp_flower_indr_setup_tc_cb, app);
+ __flow_indr_block_cb_unregister(netdev,
+ nfp_flower_indr_setup_tc_cb,
+ app);
}
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index f0ee982eb1b5..2600ce476d6b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -15,6 +15,24 @@
#define NFP_FL_MAX_ROUTES 32
+#define NFP_TUN_PRE_TUN_RULE_LIMIT 32
+#define NFP_TUN_PRE_TUN_RULE_DEL 0x1
+#define NFP_TUN_PRE_TUN_IDX_BIT 0x8
+
+/**
+ * struct nfp_tun_pre_run_rule - rule matched before decap
+ * @flags: options for the rule offset
+ * @port_idx: index of destination MAC address for the rule
+ * @vlan_tci: VLAN info associated with MAC
+ * @host_ctx_id: stats context of rule to update
+ */
+struct nfp_tun_pre_tun_rule {
+ __be32 flags;
+ __be16 port_idx;
+ __be16 vlan_tci;
+ __be32 host_ctx_id;
+};
+
/**
* struct nfp_tun_active_tuns - periodic message of active tunnels
* @seq: sequence number of the message
@@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd {
/**
* struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
- * @ht_node: Hashtable entry
- * @addr: Offloaded MAC address
- * @index: Offloaded index for given MAC address
- * @ref_count: Number of devs using this MAC address
- * @repr_list: List of reprs sharing this MAC address
+ * @ht_node: Hashtable entry
+ * @addr: Offloaded MAC address
+ * @index: Offloaded index for given MAC address
+ * @ref_count: Number of devs using this MAC address
+ * @repr_list: List of reprs sharing this MAC address
+ * @bridge_count: Number of bridge/internal devs with MAC
*/
struct nfp_tun_offloaded_mac {
struct rhash_head ht_node;
@@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac {
u16 index;
int ref_count;
struct list_head repr_list;
+ int bridge_count;
};
static const struct rhashtable_params offloaded_macs_params = {
@@ -556,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
list_del(&repr_priv->mac_list);
list_add_tail(&repr_priv->mac_list, &entry->repr_list);
+ } else if (nfp_flower_is_supported_bridge(netdev)) {
+ entry->bridge_count++;
}
entry->ref_count++;
@@ -572,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
- nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
- return 0;
+ if (entry->bridge_count ||
+ !nfp_flower_is_supported_bridge(netdev)) {
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
+ netdev, mod);
+ return 0;
+ }
+
+ /* MAC is global but matches need to go to pre_tun table. */
+ nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
}
- /* Assign a global index if non-repr or MAC address is now shared. */
- if (entry || !port) {
- ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
- if (ida_idx < 0)
- return ida_idx;
+ if (!nfp_mac_idx) {
+ /* Assign a global index if non-repr or MAC is now shared. */
+ if (entry || !port) {
+ ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (ida_idx < 0)
+ return ida_idx;
- nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
- } else {
- nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ nfp_mac_idx =
+ nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
+
+ } else {
+ nfp_mac_idx =
+ nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ }
}
if (!entry) {
@@ -654,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
list_del(&repr_priv->mac_list);
}
+ if (nfp_flower_is_supported_bridge(netdev)) {
+ entry->bridge_count--;
+
+ if (!entry->bridge_count && entry->ref_count) {
+ u16 nfp_mac_idx;
+
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
+ false)) {
+ nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
+ netdev_name(netdev));
+ return 0;
+ }
+
+ entry->index = nfp_mac_idx;
+ return 0;
+ }
+ }
+
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
u16 nfp_mac_idx;
@@ -713,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
repr_priv = repr->app_priv;
+ if (repr_priv->on_bridge)
+ return 0;
+
mac_offloaded = &repr_priv->mac_offloaded;
off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev);
@@ -828,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
if (err)
nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
netdev_name(netdev));
+ } else if (event == NETDEV_CHANGEUPPER) {
+ /* If a repr is attached to a bridge then tunnel packets
+ * entering the physical port are directed through the bridge
+ * datapath and cannot be directly detunneled. Therefore,
+ * associated offloaded MACs and indexes should not be used
+ * by fw for detunneling.
+ */
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *repr;
+
+ if (!nfp_netdev_is_nfp_repr(netdev) ||
+ !nfp_flower_is_supported_bridge(upper))
+ return NOTIFY_OK;
+
+ repr = netdev_priv(netdev);
+ if (repr->app != app)
+ return NOTIFY_OK;
+
+ repr_priv = repr->app_priv;
+
+ if (info->linking) {
+ if (nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL))
+ nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
+ netdev_name(netdev));
+ repr_priv->on_bridge = true;
+ } else {
+ repr_priv->on_bridge = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return NOTIFY_OK;
+
+ if (nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_ADD))
+ nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
+ netdev_name(netdev));
+ }
}
return NOTIFY_OK;
}
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ struct nfp_tun_offloaded_mac *mac_entry;
+ struct nfp_tun_pre_tun_rule payload;
+ struct net_device *internal_dev;
+ int err;
+
+ if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
+ return -ENOSPC;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+ internal_dev = flow->pre_tun_rule.dev;
+ payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+ payload.host_ctx_id = flow->meta.host_ctx_id;
+
+ /* Lookup MAC index for the pre-tunnel rule egress device.
+ * Note that because the device is always an internal port, it will
+ * have a constant global index so does not need to be tracked.
+ */
+ mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
+ internal_dev->dev_addr);
+ if (!mac_entry)
+ return -ENOENT;
+
+ payload.port_idx = cpu_to_be16(mac_entry->index);
+
+ /* Copy mac id and vlan to flow - dev may not exist at delete time. */
+ flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
+ flow->pre_tun_rule.port_idx = payload.port_idx;
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+ sizeof(struct nfp_tun_pre_tun_rule),
+ (unsigned char *)&payload, GFP_KERNEL);
+ if (err)
+ return err;
+
+ app_priv->pre_tun_rule_cnt++;
+
+ return 0;
+}
+
+int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ struct nfp_tun_pre_tun_rule payload;
+ u32 tmp_flags = 0;
+ int err;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+ tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
+ payload.flags = cpu_to_be32(tmp_flags);
+ payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+ payload.port_idx = flow->pre_tun_rule.port_idx;
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+ sizeof(struct nfp_tun_pre_tun_rule),
+ (unsigned char *)&payload, GFP_KERNEL);
+ if (err)
+ return err;
+
+ app_priv->pre_tun_rule_cnt--;
+
+ return 0;
+}
+
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 60e57f08de80..81679647e842 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -815,6 +815,8 @@ static void __exit nfp_main_exit(void)
module_init(nfp_main_init);
module_exit(nfp_main_exit);
+MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9903805717da..6f97b554f7da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -975,7 +975,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
@@ -1155,7 +1155,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
while (todo--) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
struct nfp_net_tx_buf *tx_buf;
struct sk_buff *skb;
int fidx, nr_frags;
@@ -1270,7 +1270,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
static void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
struct netdev_queue *nd_q;
while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index ab7f2498e1c4..553c708694e8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -159,19 +159,13 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
else
strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir);
- if (IS_ERR_OR_NULL(nn->debugfs_dir))
- return;
/* Create queue debugging sub-tree */
queues = debugfs_create_dir("queue", nn->debugfs_dir);
- if (IS_ERR_OR_NULL(queues))
- return;
rx = debugfs_create_dir("rx", queues);
tx = debugfs_create_dir("tx", queues);
xdp = debugfs_create_dir("xdp", queues);
- if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp))
- return;
for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
sprintf(name, "%d", i);
@@ -190,16 +184,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
{
- struct dentry *dev_dir;
-
- if (IS_ERR_OR_NULL(nfp_dir))
- return NULL;
-
- dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir);
- if (IS_ERR_OR_NULL(dev_dir))
- return NULL;
-
- return dev_dir;
+ return debugfs_create_dir(pci_name(pdev), nfp_dir);
}
void nfp_net_debugfs_dir_clean(struct dentry **dir)
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 3d73970b3a2e..219b0b863c89 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -993,14 +993,12 @@ static int w90p910_ether_probe(struct platform_device *pdev)
ether->txirq = platform_get_irq(pdev, 0);
if (ether->txirq < 0) {
- dev_err(&pdev->dev, "failed to get ether tx irq\n");
error = -ENXIO;
goto failed_free_io;
}
ether->rxirq = platform_get_irq(pdev, 1);
if (ether->rxirq < 0) {
- dev_err(&pdev->dev, "failed to get ether rx irq\n");
error = -ENXIO;
goto failed_free_io;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index b327b29f5d57..ecca794c55e2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -6126,8 +6126,7 @@ static void nv_remove(struct pci_dev *pci_dev)
#ifdef CONFIG_PM_SLEEP
static int nv_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 6f8d6584f809..5113ee647090 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1258,8 +1258,7 @@ static int yellowfin_close(struct net_device *dev)
yp->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
- if (yp->tx_skbuff[i])
- dev_kfree_skb(yp->tx_skbuff[i]);
+ dev_kfree_skb(yp->tx_skbuff[i]);
yp->tx_skbuff[i] = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index a391cf6ee4b2..55a29ec76680 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,15 +66,6 @@ config QLCNIC_HWMON
This data is available via the hwmon sysfs interface.
-config QLGE
- tristate "QLogic QLGE 10Gb Ethernet Driver Support"
- depends on PCI
- ---help---
- This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called qlge.
-
config NETXEN_NIC
tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
index 6cd2e333a5fc..1ae4a0743bd5 100644
--- a/drivers/net/ethernet/qlogic/Makefile
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLCNIC) += qlcnic/
-obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NETXEN_NIC) += netxen/
obj-$(CONFIG_QED) += qed/
obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 58e2eaf77014..c692a41e4548 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1980,7 +1980,7 @@ netxen_map_tx_skb(struct pci_dev *pdev,
struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
{
struct netxen_skb_frag *nf;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int i, nr_frags;
dma_addr_t map;
@@ -2043,7 +2043,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct pci_dev *pdev;
int i, k;
int delta = 0;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
u32 producer;
int frag_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 5ea6c4fc6050..859caa6c1a1f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -1756,6 +1756,15 @@ static u32 qed_read_unaligned_dword(u8 *buf)
return dword;
}
+/* Sets the value of the specified GRC param */
+static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ dev_data->grc.param_val[grc_param] = val;
+}
+
/* Returns the value of the specified GRC param */
static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
enum dbg_grc_params grc_param)
@@ -5119,6 +5128,69 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
return false;
}
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum dbg_grc_params grc_param, u32 val)
+{
+ enum dbg_status status;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
+ "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
+
+ status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Initializes the GRC parameters (if not initialized). Needed in order
+ * to set the default parameter values for the first time.
+ */
+ qed_dbg_grc_init_params(p_hwfn);
+
+ if (grc_param >= MAX_DBG_GRC_PARAMS)
+ return DBG_STATUS_INVALID_ARGS;
+ if (val < s_grc_param_defs[grc_param].min ||
+ val > s_grc_param_defs[grc_param].max)
+ return DBG_STATUS_INVALID_ARGS;
+
+ if (s_grc_param_defs[grc_param].is_preset) {
+ /* Preset param */
+
+ /* Disabling a preset is not allowed. Call
+ * dbg_grc_set_params_default instead.
+ */
+ if (!val)
+ return DBG_STATUS_INVALID_ARGS;
+
+ /* Update all params with the preset values */
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
+ u32 preset_val;
+
+ /* Skip persistent params */
+ if (s_grc_param_defs[i].is_persistent)
+ continue;
+
+ /* Find preset value */
+ if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
+ preset_val =
+ s_grc_param_defs[i].exclude_all_preset_val;
+ else if (grc_param == DBG_GRC_PARAM_CRASH)
+ preset_val =
+ s_grc_param_defs[i].crash_preset_val;
+ else
+ return DBG_STATUS_INVALID_ARGS;
+
+ qed_grc_set_param(p_hwfn,
+ (enum dbg_grc_params)i, preset_val);
+ }
+ } else {
+ /* Regular param - set its value */
+ qed_grc_set_param(p_hwfn, grc_param, val);
+ }
+
+ return DBG_STATUS_OK;
+}
+
/* Assign default GRC param values */
void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
{
@@ -7997,9 +8069,16 @@ static u32 qed_calc_regdump_header(enum debug_print_features feature,
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ int grc_params[MAX_DBG_GRC_PARAMS], i;
u32 offset = 0, feature_size;
int rc;
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ grc_params[i] = dev_data->grc.param_val[i];
+
if (cdev->num_hwfns == 1)
omit_engine = 1;
@@ -8087,6 +8166,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
rc);
}
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] = grc_params[i];
+
/* GRC dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index e054f6c69e3a..cf3ceb62e397 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -3024,6 +3024,21 @@ void qed_read_regs(struct qed_hwfn *p_hwfn,
*/
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info);
+/**
+ * @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
+ *
+ * @param p_hwfn - HW device data
+ * @param grc_param - GRC parameter
+ * @param val - Value to set.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - grc_param is invalid
+ * - val is outside the allowed boundaries
+ */
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum dbg_grc_params grc_param, u32 val);
/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
@@ -12580,6 +12595,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
+#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000
+#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
@@ -12748,6 +12765,21 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_UNSUPPORTED 0x00000000
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 9f36e7948222..1a5fc2ae351c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_pstorm_per_queue_stat pstats;
u32 pstats_addr = 0, pstats_len = 0;
@@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(pstats.error_drop_pkts);
}
-static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
@@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack
+void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_ustorm_per_queue_stat ustats;
u32 ustats_addr = 0, ustats_len = 0;
@@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_mstorm_per_queue_stat mstats;
u32 mstats_addr = 0, mstats_len = 0;
@@ -1780,9 +1776,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
-static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats)
+static noinline_for_stack void
+__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
{
struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 1efff7f68ef6..ac1511a834d8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -67,6 +67,10 @@
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
#define QED_RDMA_SRQS QED_ROCE_QPS
+#define QED_NVM_CFG_SET_FLAGS 0xE
+#define QED_NVM_CFG_SET_PF_FLAGS 0x1E
+#define QED_NVM_CFG_GET_FLAGS 0xA
+#define QED_NVM_CFG_GET_PF_FLAGS 0x1A
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -1690,6 +1694,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
switch (media_type) {
case MEDIA_DA_TWINAX:
+ *if_capability |= QED_LM_FIBRE_BIT;
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
*if_capability |= QED_LM_20000baseKR2_Full_BIT;
/* For DAC media multiple speed capabilities are supported*/
@@ -1709,6 +1714,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
*if_capability |= QED_LM_100000baseCR4_Full_BIT;
break;
case MEDIA_BASE_T:
+ *if_capability |= QED_LM_TP_BIT;
if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
@@ -1720,6 +1726,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
}
}
if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
+ *if_capability |= QED_LM_FIBRE_BIT;
if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
*if_capability |= QED_LM_1000baseT_Full_BIT;
if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
@@ -1730,6 +1737,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
case MEDIA_SFPP_10G_FIBER:
case MEDIA_XFP_FIBER:
case MEDIA_MODULE_FIBER:
+ *if_capability |= QED_LM_FIBRE_BIT;
if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
@@ -1772,6 +1780,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
break;
case MEDIA_KR:
+ *if_capability |= QED_LM_Backplane_BIT;
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
*if_capability |= QED_LM_20000baseKR2_Full_BIT;
if (capability &
@@ -1823,7 +1832,6 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->link_up = true;
/* TODO - at the moment assume supported and advertised speed equal */
- if_link->supported_caps = QED_LM_FIBRE_BIT;
if (link_caps.default_speed_autoneg)
if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
@@ -2229,6 +2237,93 @@ static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
return 0;
}
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x5 [command index] |
+ * 4B | Entity ID | Reserved | Number of config attributes |
+ * 8B | Config ID | Length | Value |
+ * | |
+ * \----------------------------------------------------------------------/
+ * There can be several cfg_id-Length-Value sets as specified by 'Number of...'.
+ * Entity ID - A non zero entity value for which the config need to be updated.
+ *
+ * The API parses config attributes from the user provided buffer and flashes
+ * them to the respective NVM path using Management FW inerface.
+ */
+static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 entity_id, len, buf[32];
+ struct qed_ptt *ptt;
+ u16 cfg_id, count;
+ int rc = 0, i;
+ u32 flags;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ /* NVM CFG ID attribute header */
+ *data += 4;
+ entity_id = **data;
+ *data += 2;
+ count = *((u16 *)*data);
+ *data += 2;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read config ids: entity id %02x num _attrs = %0d\n",
+ entity_id, count);
+ /* NVM CFG ID attributes */
+ for (i = 0; i < count; i++) {
+ cfg_id = *((u16 *)*data);
+ *data += 2;
+ len = **data;
+ (*data)++;
+ memcpy(buf, *data, len);
+ *data += len;
+
+ flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
+ QED_NVM_CFG_SET_FLAGS;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "cfg_id = %d len = %d\n", cfg_id, len);
+ rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
+ buf, len);
+ if (rc) {
+ DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
+ break;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
+ u32 cmd, u32 entity_id)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ u32 flags, len;
+ int rc = 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read config cmd = %d entity id %d\n", cmd, entity_id);
+ flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
+ rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
+ if (rc)
+ DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
{
const struct firmware *image;
@@ -2270,6 +2365,9 @@ static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
rc = qed_nvm_flash_image_access(cdev, &data,
&check_resp);
break;
+ case QED_NVM_FLASH_CMD_NVM_CFG_ID:
+ rc = qed_nvm_flash_cfg_write(cdev, &data);
+ break;
default:
DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
rc = -EINVAL;
@@ -2485,6 +2583,26 @@ static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
return rc;
}
+static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
{
return QED_AFFIN_HWFN_IDX(cdev);
@@ -2538,6 +2656,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.db_recovery_del = &qed_db_recovery_del,
.read_module_eeprom = &qed_read_module_eeprom,
.get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
+ .read_nvm_cfg = &qed_nvm_flash_cfg_read,
+ .set_grc_config = &qed_set_grc_config,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 758702c1ce9c..36ddb89856a8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3750,3 +3750,64 @@ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+
+int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 *p_len)
+{
+ u32 mb_param = 0, resp, param;
+ int rc;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION,
+ mb_param, &resp, &param, p_len, (u32 *)p_buf);
+
+ return rc;
+}
+
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len)
+{
+ u32 mb_param = 0, resp, param;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_ALL)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_COMMIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION,
+ mb_param, &resp, &param, len, (u32 *)p_buf);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index e4f8fe4bd062..9c4c2763de8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -251,6 +251,12 @@ union qed_mfw_tlv_data {
struct qed_mfw_tlv_iscsi iscsi;
};
+#define QED_NVM_CFG_OPTION_ALL BIT(0)
+#define QED_NVM_CFG_OPTION_INIT BIT(1)
+#define QED_NVM_CFG_OPTION_COMMIT BIT(2)
+#define QED_NVM_CFG_OPTION_FREE BIT(3)
+#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
+
/**
* @brief - returns the link params of the hw function
*
@@ -1202,4 +1208,33 @@ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+/**
+ * @brief Get NVM config attribute value.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param option_id
+ * @param entity_id
+ * @param flags
+ * @param p_buf
+ * @param p_len
+ */
+int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 *p_len);
+
+/**
+ * @brief Set NVM config attribute value.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param option_id
+ * @param entity_id
+ * @param flags
+ * @param p_buf
+ * @param len
+ */
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len);
#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 0e931c04fecf..c303a92d5b06 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -177,6 +177,20 @@ enum qede_flags_bit {
QEDE_FLAGS_TX_TIMESTAMPING_EN
};
+#define QEDE_DUMP_MAX_ARGS 4
+enum qede_dump_cmd {
+ QEDE_DUMP_CMD_NONE = 0,
+ QEDE_DUMP_CMD_NVM_CFG,
+ QEDE_DUMP_CMD_GRCDUMP,
+ QEDE_DUMP_CMD_MAX
+};
+
+struct qede_dump_info {
+ enum qede_dump_cmd cmd;
+ u8 num_args;
+ u32 args[QEDE_DUMP_MAX_ARGS];
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -262,6 +276,7 @@ struct qede_dev {
struct qede_rdma_dev rdma_info;
struct bpf_prog *xdp_prog;
+ struct qede_dump_info dump_info;
};
enum QEDE_STATE {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index e85f9fef930c..ec27a43230d7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -48,6 +48,9 @@
{QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
#define QEDE_SELFTEST_POLL_COUNT 100
+#define QEDE_DUMP_VERSION 0x1
+#define QEDE_DUMP_NVM_BUF_LEN 32
+#define QEDE_DUMP_NVM_ARG_COUNT 2
static const struct {
u64 offset;
@@ -424,12 +427,13 @@ struct qede_link_mode_mapping {
};
static const struct qede_link_mode_mapping qed_lm_map[] = {
+ {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
{QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
{QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
{QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
{QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
{QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
- {QED_LM_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
+ {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT},
{QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
{QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
{QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT},
@@ -1972,6 +1976,114 @@ static int qede_get_module_eeprom(struct net_device *dev,
return rc;
}
+static int qede_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int rc = 0;
+
+ if (edev->dump_info.cmd == QEDE_DUMP_CMD_NONE) {
+ if (val->flag > QEDE_DUMP_CMD_MAX) {
+ DP_ERR(edev, "Invalid command %d\n", val->flag);
+ return -EINVAL;
+ }
+ edev->dump_info.cmd = val->flag;
+ edev->dump_info.num_args = 0;
+ return 0;
+ }
+
+ if (edev->dump_info.num_args == QEDE_DUMP_MAX_ARGS) {
+ DP_ERR(edev, "Arg count = %d\n", edev->dump_info.num_args);
+ return -EINVAL;
+ }
+
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ edev->dump_info.args[edev->dump_info.num_args] = val->flag;
+ edev->dump_info.num_args++;
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ rc = edev->ops->common->set_grc_config(edev->cdev,
+ val->flag, 1);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static int qede_get_dump_flag(struct net_device *dev,
+ struct ethtool_dump *dump)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops || !edev->ops->common) {
+ DP_ERR(edev, "Edev ops not populated\n");
+ return -EINVAL;
+ }
+
+ dump->version = QEDE_DUMP_VERSION;
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ dump->flag = QEDE_DUMP_CMD_NVM_CFG;
+ dump->len = QEDE_DUMP_NVM_BUF_LEN;
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ dump->flag = QEDE_DUMP_CMD_GRCDUMP;
+ dump->len = edev->ops->common->dbg_all_data_size(edev->cdev);
+ break;
+ default:
+ DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "dump->version = 0x%x dump->flag = %d dump->len = %d\n",
+ dump->version, dump->flag, dump->len);
+ return 0;
+}
+
+static int qede_get_dump_data(struct net_device *dev,
+ struct ethtool_dump *dump, void *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int rc = 0;
+
+ if (!edev->ops || !edev->ops->common) {
+ DP_ERR(edev, "Edev ops not populated\n");
+ edev->dump_info.cmd = QEDE_DUMP_CMD_NONE;
+ edev->dump_info.num_args = 0;
+ return -EINVAL;
+ }
+
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ if (edev->dump_info.num_args != QEDE_DUMP_NVM_ARG_COUNT) {
+ DP_ERR(edev, "Arg count = %d required = %d\n",
+ edev->dump_info.num_args,
+ QEDE_DUMP_NVM_ARG_COUNT);
+ return -EINVAL;
+ }
+ rc = edev->ops->common->read_nvm_cfg(edev->cdev, (u8 **)&buf,
+ edev->dump_info.args[0],
+ edev->dump_info.args[1]);
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ memset(buf, 0, dump->len);
+ rc = edev->ops->common->dbg_all_data(edev->cdev, buf);
+ break;
+ default:
+ DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ edev->dump_info.cmd = QEDE_DUMP_CMD_NONE;
+ edev->dump_info.num_args = 0;
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
@@ -2013,6 +2125,9 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
.flash_device = qede_flash_device,
+ .get_dump_flag = qede_get_dump_flag,
+ .get_dump_data = qede_get_dump_data,
+ .set_dump = qede_set_dump,
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 14f26bf3b388..ac61f614de37 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -581,7 +581,7 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
struct qlcnic_cmd_buffer *pbuf)
{
struct qlcnic_skb_frag *nf;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int i, nr_frags;
dma_addr_t map;
diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile
deleted file mode 100644
index 1dc2568e820c..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Qlogic 10GbE PCI Express ethernet driver
-#
-
-obj-$(CONFIG_QLGE) += qlge.o
-
-qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
deleted file mode 100644
index ad7c5eb8a3b6..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ /dev/null
@@ -1,2353 +0,0 @@
-/*
- * QLogic QLA41xx NIC HBA Driver
- * Copyright (c) 2003-2006 QLogic Corporation
- *
- * See LICENSE.qlge for copyright and licensing details.
- */
-#ifndef _QLGE_H_
-#define _QLGE_H_
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_vlan.h>
-
-/*
- * General definitions...
- */
-#define DRV_NAME "qlge"
-#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "1.00.00.35"
-
-#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
-
-#define QLGE_VENDOR_ID 0x1077
-#define QLGE_DEVICE_ID_8012 0x8012
-#define QLGE_DEVICE_ID_8000 0x8000
-#define QLGE_MEZZ_SSYS_ID_068 0x0068
-#define QLGE_MEZZ_SSYS_ID_180 0x0180
-#define MAX_CPUS 8
-#define MAX_TX_RINGS MAX_CPUS
-#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
-
-#define NUM_TX_RING_ENTRIES 256
-#define NUM_RX_RING_ENTRIES 256
-
-#define NUM_SMALL_BUFFERS 512
-#define NUM_LARGE_BUFFERS 512
-#define DB_PAGE_SIZE 4096
-
-/* Calculate the number of (4k) pages required to
- * contain a buffer queue of the given length.
- */
-#define MAX_DB_PAGES_PER_BQ(x) \
- (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
- (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
-
-#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define LARGE_BUFFER_MAX_SIZE 8192
-#define LARGE_BUFFER_MIN_SIZE 2048
-
-#define MAX_CQ 128
-#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
-#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
-#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
-#define UDELAY_COUNT 3
-#define UDELAY_DELAY 100
-
-
-#define TX_DESC_PER_IOCB 8
-
-#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
-#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
-#else /* all other page sizes */
-#define TX_DESC_PER_OAL 0
-#endif
-
-/* Word shifting for converting 64-bit
- * address to a series of 16-bit words.
- * This is used for some MPI firmware
- * mailbox commands.
- */
-#define LSW(x) ((u16)(x))
-#define MSW(x) ((u16)((u32)(x) >> 16))
-#define LSD(x) ((u32)((u64)(x)))
-#define MSD(x) ((u32)((((u64)(x)) >> 32)))
-
-/* MPI test register definitions. This register
- * is used for determining alternate NIC function's
- * PCI->func number.
- */
-enum {
- MPI_TEST_FUNC_PORT_CFG = 0x1002,
- MPI_TEST_FUNC_PRB_CTL = 0x100e,
- MPI_TEST_FUNC_PRB_EN = 0x18a20000,
- MPI_TEST_FUNC_RST_STS = 0x100a,
- MPI_TEST_FUNC_RST_FRC = 0x00000003,
- MPI_TEST_NIC_FUNC_MASK = 0x00000007,
- MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
- MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
- MPI_TEST_NIC1_FUNC_SHIFT = 1,
- MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
- MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
- MPI_TEST_NIC2_FUNC_SHIFT = 5,
- MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
- MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
- MPI_TEST_FC1_FUNCTION_SHIFT = 9,
- MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
- MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
- MPI_TEST_FC2_FUNCTION_SHIFT = 13,
-
- MPI_NIC_READ = 0x00000000,
- MPI_NIC_REG_BLOCK = 0x00020000,
- MPI_NIC_FUNCTION_SHIFT = 6,
-};
-
-/*
- * Processor Address Register (PROC_ADDR) bit definitions.
- */
-enum {
-
- /* Misc. stuff */
- MAILBOX_COUNT = 16,
- MAILBOX_TIMEOUT = 5,
-
- PROC_ADDR_RDY = (1 << 31),
- PROC_ADDR_R = (1 << 30),
- PROC_ADDR_ERR = (1 << 29),
- PROC_ADDR_DA = (1 << 28),
- PROC_ADDR_FUNC0_MBI = 0x00001180,
- PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
- PROC_ADDR_FUNC0_CTL = 0x000011a1,
- PROC_ADDR_FUNC2_MBI = 0x00001280,
- PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
- PROC_ADDR_FUNC2_CTL = 0x000012a1,
- PROC_ADDR_MPI_RISC = 0x00000000,
- PROC_ADDR_MDE = 0x00010000,
- PROC_ADDR_REGBLOCK = 0x00020000,
- PROC_ADDR_RISC_REG = 0x00030000,
-};
-
-/*
- * System Register (SYS) bit definitions.
- */
-enum {
- SYS_EFE = (1 << 0),
- SYS_FAE = (1 << 1),
- SYS_MDC = (1 << 2),
- SYS_DST = (1 << 3),
- SYS_DWC = (1 << 4),
- SYS_EVW = (1 << 5),
- SYS_OMP_DLY_MASK = 0x3f000000,
- /*
- * There are no values defined as of edit #15.
- */
- SYS_ODI = (1 << 14),
-};
-
-/*
- * Reset/Failover Register (RST_FO) bit definitions.
- */
-enum {
- RST_FO_TFO = (1 << 0),
- RST_FO_RR_MASK = 0x00060000,
- RST_FO_RR_CQ_CAM = 0x00000000,
- RST_FO_RR_DROP = 0x00000002,
- RST_FO_RR_DQ = 0x00000004,
- RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
- RST_FO_FRB = (1 << 12),
- RST_FO_MOP = (1 << 13),
- RST_FO_REG = (1 << 14),
- RST_FO_FR = (1 << 15),
-};
-
-/*
- * Function Specific Control Register (FSC) bit definitions.
- */
-enum {
- FSC_DBRST_MASK = 0x00070000,
- FSC_DBRST_256 = 0x00000000,
- FSC_DBRST_512 = 0x00000001,
- FSC_DBRST_768 = 0x00000002,
- FSC_DBRST_1024 = 0x00000003,
- FSC_DBL_MASK = 0x00180000,
- FSC_DBL_DBRST = 0x00000000,
- FSC_DBL_MAX_PLD = 0x00000008,
- FSC_DBL_MAX_BRST = 0x00000010,
- FSC_DBL_128_BYTES = 0x00000018,
- FSC_EC = (1 << 5),
- FSC_EPC_MASK = 0x00c00000,
- FSC_EPC_INBOUND = (1 << 6),
- FSC_EPC_OUTBOUND = (1 << 7),
- FSC_VM_PAGESIZE_MASK = 0x07000000,
- FSC_VM_PAGE_2K = 0x00000100,
- FSC_VM_PAGE_4K = 0x00000200,
- FSC_VM_PAGE_8K = 0x00000300,
- FSC_VM_PAGE_64K = 0x00000600,
- FSC_SH = (1 << 11),
- FSC_DSB = (1 << 12),
- FSC_STE = (1 << 13),
- FSC_FE = (1 << 15),
-};
-
-/*
- * Host Command Status Register (CSR) bit definitions.
- */
-enum {
- CSR_ERR_STS_MASK = 0x0000003f,
- /*
- * There are no valued defined as of edit #15.
- */
- CSR_RR = (1 << 8),
- CSR_HRI = (1 << 9),
- CSR_RP = (1 << 10),
- CSR_CMD_PARM_SHIFT = 22,
- CSR_CMD_NOP = 0x00000000,
- CSR_CMD_SET_RST = 0x10000000,
- CSR_CMD_CLR_RST = 0x20000000,
- CSR_CMD_SET_PAUSE = 0x30000000,
- CSR_CMD_CLR_PAUSE = 0x40000000,
- CSR_CMD_SET_H2R_INT = 0x50000000,
- CSR_CMD_CLR_H2R_INT = 0x60000000,
- CSR_CMD_PAR_EN = 0x70000000,
- CSR_CMD_SET_BAD_PAR = 0x80000000,
- CSR_CMD_CLR_BAD_PAR = 0x90000000,
- CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
-};
-
-/*
- * Configuration Register (CFG) bit definitions.
- */
-enum {
- CFG_LRQ = (1 << 0),
- CFG_DRQ = (1 << 1),
- CFG_LR = (1 << 2),
- CFG_DR = (1 << 3),
- CFG_LE = (1 << 5),
- CFG_LCQ = (1 << 6),
- CFG_DCQ = (1 << 7),
- CFG_Q_SHIFT = 8,
- CFG_Q_MASK = 0x7f000000,
-};
-
-/*
- * Status Register (STS) bit definitions.
- */
-enum {
- STS_FE = (1 << 0),
- STS_PI = (1 << 1),
- STS_PL0 = (1 << 2),
- STS_PL1 = (1 << 3),
- STS_PI0 = (1 << 4),
- STS_PI1 = (1 << 5),
- STS_FUNC_ID_MASK = 0x000000c0,
- STS_FUNC_ID_SHIFT = 6,
- STS_F0E = (1 << 8),
- STS_F1E = (1 << 9),
- STS_F2E = (1 << 10),
- STS_F3E = (1 << 11),
- STS_NFE = (1 << 12),
-};
-
-/*
- * Interrupt Enable Register (INTR_EN) bit definitions.
- */
-enum {
- INTR_EN_INTR_MASK = 0x007f0000,
- INTR_EN_TYPE_MASK = 0x03000000,
- INTR_EN_TYPE_ENABLE = 0x00000100,
- INTR_EN_TYPE_DISABLE = 0x00000200,
- INTR_EN_TYPE_READ = 0x00000300,
- INTR_EN_IHD = (1 << 13),
- INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
- INTR_EN_EI = (1 << 14),
- INTR_EN_EN = (1 << 15),
-};
-
-/*
- * Interrupt Mask Register (INTR_MASK) bit definitions.
- */
-enum {
- INTR_MASK_PI = (1 << 0),
- INTR_MASK_HL0 = (1 << 1),
- INTR_MASK_LH0 = (1 << 2),
- INTR_MASK_HL1 = (1 << 3),
- INTR_MASK_LH1 = (1 << 4),
- INTR_MASK_SE = (1 << 5),
- INTR_MASK_LSC = (1 << 6),
- INTR_MASK_MC = (1 << 7),
- INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
-};
-
-/*
- * Register (REV_ID) bit definitions.
- */
-enum {
- REV_ID_MASK = 0x0000000f,
- REV_ID_NICROLL_SHIFT = 0,
- REV_ID_NICREV_SHIFT = 4,
- REV_ID_XGROLL_SHIFT = 8,
- REV_ID_XGREV_SHIFT = 12,
- REV_ID_CHIPREV_SHIFT = 28,
-};
-
-/*
- * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
- */
-enum {
- FRC_ECC_ERR_VW = (1 << 12),
- FRC_ECC_ERR_VB = (1 << 13),
- FRC_ECC_ERR_NI = (1 << 14),
- FRC_ECC_ERR_NO = (1 << 15),
- FRC_ECC_PFE_SHIFT = 16,
- FRC_ECC_ERR_DO = (1 << 18),
- FRC_ECC_P14 = (1 << 19),
-};
-
-/*
- * Error Status Register (ERR_STS) bit definitions.
- */
-enum {
- ERR_STS_NOF = (1 << 0),
- ERR_STS_NIF = (1 << 1),
- ERR_STS_DRP = (1 << 2),
- ERR_STS_XGP = (1 << 3),
- ERR_STS_FOU = (1 << 4),
- ERR_STS_FOC = (1 << 5),
- ERR_STS_FOF = (1 << 6),
- ERR_STS_FIU = (1 << 7),
- ERR_STS_FIC = (1 << 8),
- ERR_STS_FIF = (1 << 9),
- ERR_STS_MOF = (1 << 10),
- ERR_STS_TA = (1 << 11),
- ERR_STS_MA = (1 << 12),
- ERR_STS_MPE = (1 << 13),
- ERR_STS_SCE = (1 << 14),
- ERR_STS_STE = (1 << 15),
- ERR_STS_FOW = (1 << 16),
- ERR_STS_UE = (1 << 17),
- ERR_STS_MCH = (1 << 26),
- ERR_STS_LOC_SHIFT = 27,
-};
-
-/*
- * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
- */
-enum {
- RAM_DBG_ADDR_FW = (1 << 30),
- RAM_DBG_ADDR_FR = (1 << 31),
-};
-
-/*
- * Semaphore Register (SEM) bit definitions.
- */
-enum {
- /*
- * Example:
- * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
- */
- SEM_CLEAR = 0,
- SEM_SET = 1,
- SEM_FORCE = 3,
- SEM_XGMAC0_SHIFT = 0,
- SEM_XGMAC1_SHIFT = 2,
- SEM_ICB_SHIFT = 4,
- SEM_MAC_ADDR_SHIFT = 6,
- SEM_FLASH_SHIFT = 8,
- SEM_PROBE_SHIFT = 10,
- SEM_RT_IDX_SHIFT = 12,
- SEM_PROC_REG_SHIFT = 14,
- SEM_XGMAC0_MASK = 0x00030000,
- SEM_XGMAC1_MASK = 0x000c0000,
- SEM_ICB_MASK = 0x00300000,
- SEM_MAC_ADDR_MASK = 0x00c00000,
- SEM_FLASH_MASK = 0x03000000,
- SEM_PROBE_MASK = 0x0c000000,
- SEM_RT_IDX_MASK = 0x30000000,
- SEM_PROC_REG_MASK = 0xc0000000,
-};
-
-/*
- * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
- */
-enum {
- XGMAC_ADDR_RDY = (1 << 31),
- XGMAC_ADDR_R = (1 << 30),
- XGMAC_ADDR_XME = (1 << 29),
-
- /* XGMAC control registers */
- PAUSE_SRC_LO = 0x00000100,
- PAUSE_SRC_HI = 0x00000104,
- GLOBAL_CFG = 0x00000108,
- GLOBAL_CFG_RESET = (1 << 0),
- GLOBAL_CFG_JUMBO = (1 << 6),
- GLOBAL_CFG_TX_STAT_EN = (1 << 10),
- GLOBAL_CFG_RX_STAT_EN = (1 << 11),
- TX_CFG = 0x0000010c,
- TX_CFG_RESET = (1 << 0),
- TX_CFG_EN = (1 << 1),
- TX_CFG_PREAM = (1 << 2),
- RX_CFG = 0x00000110,
- RX_CFG_RESET = (1 << 0),
- RX_CFG_EN = (1 << 1),
- RX_CFG_PREAM = (1 << 2),
- FLOW_CTL = 0x0000011c,
- PAUSE_OPCODE = 0x00000120,
- PAUSE_TIMER = 0x00000124,
- PAUSE_FRM_DEST_LO = 0x00000128,
- PAUSE_FRM_DEST_HI = 0x0000012c,
- MAC_TX_PARAMS = 0x00000134,
- MAC_TX_PARAMS_JUMBO = (1 << 31),
- MAC_TX_PARAMS_SIZE_SHIFT = 16,
- MAC_RX_PARAMS = 0x00000138,
- MAC_SYS_INT = 0x00000144,
- MAC_SYS_INT_MASK = 0x00000148,
- MAC_MGMT_INT = 0x0000014c,
- MAC_MGMT_IN_MASK = 0x00000150,
- EXT_ARB_MODE = 0x000001fc,
-
- /* XGMAC TX statistics registers */
- TX_PKTS = 0x00000200,
- TX_BYTES = 0x00000208,
- TX_MCAST_PKTS = 0x00000210,
- TX_BCAST_PKTS = 0x00000218,
- TX_UCAST_PKTS = 0x00000220,
- TX_CTL_PKTS = 0x00000228,
- TX_PAUSE_PKTS = 0x00000230,
- TX_64_PKT = 0x00000238,
- TX_65_TO_127_PKT = 0x00000240,
- TX_128_TO_255_PKT = 0x00000248,
- TX_256_511_PKT = 0x00000250,
- TX_512_TO_1023_PKT = 0x00000258,
- TX_1024_TO_1518_PKT = 0x00000260,
- TX_1519_TO_MAX_PKT = 0x00000268,
- TX_UNDERSIZE_PKT = 0x00000270,
- TX_OVERSIZE_PKT = 0x00000278,
-
- /* XGMAC statistics control registers */
- RX_HALF_FULL_DET = 0x000002a0,
- TX_HALF_FULL_DET = 0x000002a4,
- RX_OVERFLOW_DET = 0x000002a8,
- TX_OVERFLOW_DET = 0x000002ac,
- RX_HALF_FULL_MASK = 0x000002b0,
- TX_HALF_FULL_MASK = 0x000002b4,
- RX_OVERFLOW_MASK = 0x000002b8,
- TX_OVERFLOW_MASK = 0x000002bc,
- STAT_CNT_CTL = 0x000002c0,
- STAT_CNT_CTL_CLEAR_TX = (1 << 0),
- STAT_CNT_CTL_CLEAR_RX = (1 << 1),
- AUX_RX_HALF_FULL_DET = 0x000002d0,
- AUX_TX_HALF_FULL_DET = 0x000002d4,
- AUX_RX_OVERFLOW_DET = 0x000002d8,
- AUX_TX_OVERFLOW_DET = 0x000002dc,
- AUX_RX_HALF_FULL_MASK = 0x000002f0,
- AUX_TX_HALF_FULL_MASK = 0x000002f4,
- AUX_RX_OVERFLOW_MASK = 0x000002f8,
- AUX_TX_OVERFLOW_MASK = 0x000002fc,
-
- /* XGMAC RX statistics registers */
- RX_BYTES = 0x00000300,
- RX_BYTES_OK = 0x00000308,
- RX_PKTS = 0x00000310,
- RX_PKTS_OK = 0x00000318,
- RX_BCAST_PKTS = 0x00000320,
- RX_MCAST_PKTS = 0x00000328,
- RX_UCAST_PKTS = 0x00000330,
- RX_UNDERSIZE_PKTS = 0x00000338,
- RX_OVERSIZE_PKTS = 0x00000340,
- RX_JABBER_PKTS = 0x00000348,
- RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
- RX_DROP_EVENTS = 0x00000358,
- RX_FCERR_PKTS = 0x00000360,
- RX_ALIGN_ERR = 0x00000368,
- RX_SYMBOL_ERR = 0x00000370,
- RX_MAC_ERR = 0x00000378,
- RX_CTL_PKTS = 0x00000380,
- RX_PAUSE_PKTS = 0x00000388,
- RX_64_PKTS = 0x00000390,
- RX_65_TO_127_PKTS = 0x00000398,
- RX_128_255_PKTS = 0x000003a0,
- RX_256_511_PKTS = 0x000003a8,
- RX_512_TO_1023_PKTS = 0x000003b0,
- RX_1024_TO_1518_PKTS = 0x000003b8,
- RX_1519_TO_MAX_PKTS = 0x000003c0,
- RX_LEN_ERR_PKTS = 0x000003c8,
-
- /* XGMAC MDIO control registers */
- MDIO_TX_DATA = 0x00000400,
- MDIO_RX_DATA = 0x00000410,
- MDIO_CMD = 0x00000420,
- MDIO_PHY_ADDR = 0x00000430,
- MDIO_PORT = 0x00000440,
- MDIO_STATUS = 0x00000450,
-
- XGMAC_REGISTER_END = 0x00000740,
-};
-
-/*
- * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
- */
-enum {
- ETS_QUEUE_SHIFT = 29,
- ETS_REF = (1 << 26),
- ETS_RS = (1 << 27),
- ETS_P = (1 << 28),
- ETS_FC_COS_SHIFT = 23,
-};
-
-/*
- * Flash Address Register (FLASH_ADDR) bit definitions.
- */
-enum {
- FLASH_ADDR_RDY = (1 << 31),
- FLASH_ADDR_R = (1 << 30),
- FLASH_ADDR_ERR = (1 << 29),
-};
-
-/*
- * Stop CQ Processing Register (CQ_STOP) bit definitions.
- */
-enum {
- CQ_STOP_QUEUE_MASK = (0x007f0000),
- CQ_STOP_TYPE_MASK = (0x03000000),
- CQ_STOP_TYPE_START = 0x00000100,
- CQ_STOP_TYPE_STOP = 0x00000200,
- CQ_STOP_TYPE_READ = 0x00000300,
- CQ_STOP_EN = (1 << 15),
-};
-
-/*
- * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
- */
-enum {
- MAC_ADDR_IDX_SHIFT = 4,
- MAC_ADDR_TYPE_SHIFT = 16,
- MAC_ADDR_TYPE_COUNT = 10,
- MAC_ADDR_TYPE_MASK = 0x000f0000,
- MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
- MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
- MAC_ADDR_TYPE_VLAN = 0x00020000,
- MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
- MAC_ADDR_TYPE_FC_MAC = 0x00040000,
- MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
- MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
- MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
- MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
- MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
- MAC_ADDR_ADR = (1 << 25),
- MAC_ADDR_RS = (1 << 26),
- MAC_ADDR_E = (1 << 27),
- MAC_ADDR_MR = (1 << 30),
- MAC_ADDR_MW = (1 << 31),
- MAX_MULTICAST_ENTRIES = 32,
-
- /* Entry count and words per entry
- * for each address type in the filter.
- */
- MAC_ADDR_MAX_CAM_ENTRIES = 512,
- MAC_ADDR_MAX_CAM_WCOUNT = 3,
- MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
- MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
- MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
- MAC_ADDR_MAX_VLAN_WCOUNT = 1,
- MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
- MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
- MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
- MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
- MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
- MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
- MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
- MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
- MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
- MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
- MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
-};
-
-/*
- * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
- */
-enum {
- SPLT_HDR_EP = (1 << 31),
-};
-
-/*
- * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
- */
-enum {
- FC_RCV_CFG_ECT = (1 << 15),
- FC_RCV_CFG_DFH = (1 << 20),
- FC_RCV_CFG_DVF = (1 << 21),
- FC_RCV_CFG_RCE = (1 << 27),
- FC_RCV_CFG_RFE = (1 << 28),
- FC_RCV_CFG_TEE = (1 << 29),
- FC_RCV_CFG_TCE = (1 << 30),
- FC_RCV_CFG_TFE = (1 << 31),
-};
-
-/*
- * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
- */
-enum {
- NIC_RCV_CFG_PPE = (1 << 0),
- NIC_RCV_CFG_VLAN_MASK = 0x00060000,
- NIC_RCV_CFG_VLAN_ALL = 0x00000000,
- NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
- NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
- NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
- NIC_RCV_CFG_RV = (1 << 3),
- NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
- NIC_RCV_CFG_DFQ_SHIFT = 8,
- NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
-};
-
-/*
- * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
- */
-enum {
- MGMT_RCV_CFG_ARP = (1 << 0),
- MGMT_RCV_CFG_DHC = (1 << 1),
- MGMT_RCV_CFG_DHS = (1 << 2),
- MGMT_RCV_CFG_NP = (1 << 3),
- MGMT_RCV_CFG_I6N = (1 << 4),
- MGMT_RCV_CFG_I6R = (1 << 5),
- MGMT_RCV_CFG_DH6 = (1 << 6),
- MGMT_RCV_CFG_UD1 = (1 << 7),
- MGMT_RCV_CFG_UD0 = (1 << 8),
- MGMT_RCV_CFG_BCT = (1 << 9),
- MGMT_RCV_CFG_MCT = (1 << 10),
- MGMT_RCV_CFG_DM = (1 << 11),
- MGMT_RCV_CFG_RM = (1 << 12),
- MGMT_RCV_CFG_STL = (1 << 13),
- MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
- MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
- MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
- MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
- MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
-};
-
-/*
- * Routing Index Register (RT_IDX) bit definitions.
- */
-enum {
- RT_IDX_IDX_SHIFT = 8,
- RT_IDX_TYPE_MASK = 0x000f0000,
- RT_IDX_TYPE_SHIFT = 16,
- RT_IDX_TYPE_RT = 0x00000000,
- RT_IDX_TYPE_RT_INV = 0x00010000,
- RT_IDX_TYPE_NICQ = 0x00020000,
- RT_IDX_TYPE_NICQ_INV = 0x00030000,
- RT_IDX_DST_MASK = 0x00700000,
- RT_IDX_DST_RSS = 0x00000000,
- RT_IDX_DST_CAM_Q = 0x00100000,
- RT_IDX_DST_COS_Q = 0x00200000,
- RT_IDX_DST_DFLT_Q = 0x00300000,
- RT_IDX_DST_DEST_Q = 0x00400000,
- RT_IDX_RS = (1 << 26),
- RT_IDX_E = (1 << 27),
- RT_IDX_MR = (1 << 30),
- RT_IDX_MW = (1 << 31),
-
- /* Nic Queue format - type 2 bits */
- RT_IDX_BCAST = (1 << 0),
- RT_IDX_MCAST = (1 << 1),
- RT_IDX_MCAST_MATCH = (1 << 2),
- RT_IDX_MCAST_REG_MATCH = (1 << 3),
- RT_IDX_MCAST_HASH_MATCH = (1 << 4),
- RT_IDX_FC_MACH = (1 << 5),
- RT_IDX_ETH_FCOE = (1 << 6),
- RT_IDX_CAM_HIT = (1 << 7),
- RT_IDX_CAM_BIT0 = (1 << 8),
- RT_IDX_CAM_BIT1 = (1 << 9),
- RT_IDX_VLAN_TAG = (1 << 10),
- RT_IDX_VLAN_MATCH = (1 << 11),
- RT_IDX_VLAN_FILTER = (1 << 12),
- RT_IDX_ETH_SKIP1 = (1 << 13),
- RT_IDX_ETH_SKIP2 = (1 << 14),
- RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
- RT_IDX_802_3 = (1 << 16),
- RT_IDX_LLDP = (1 << 17),
- RT_IDX_UNUSED018 = (1 << 18),
- RT_IDX_UNUSED019 = (1 << 19),
- RT_IDX_UNUSED20 = (1 << 20),
- RT_IDX_UNUSED21 = (1 << 21),
- RT_IDX_ERR = (1 << 22),
- RT_IDX_VALID = (1 << 23),
- RT_IDX_TU_CSUM_ERR = (1 << 24),
- RT_IDX_IP_CSUM_ERR = (1 << 25),
- RT_IDX_MAC_ERR = (1 << 26),
- RT_IDX_RSS_TCP6 = (1 << 27),
- RT_IDX_RSS_TCP4 = (1 << 28),
- RT_IDX_RSS_IPV6 = (1 << 29),
- RT_IDX_RSS_IPV4 = (1 << 30),
- RT_IDX_RSS_MATCH = (1 << 31),
-
- /* Hierarchy for the NIC Queue Mask */
- RT_IDX_ALL_ERR_SLOT = 0,
- RT_IDX_MAC_ERR_SLOT = 0,
- RT_IDX_IP_CSUM_ERR_SLOT = 1,
- RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
- RT_IDX_BCAST_SLOT = 3,
- RT_IDX_MCAST_MATCH_SLOT = 4,
- RT_IDX_ALLMULTI_SLOT = 5,
- RT_IDX_UNUSED6_SLOT = 6,
- RT_IDX_UNUSED7_SLOT = 7,
- RT_IDX_RSS_MATCH_SLOT = 8,
- RT_IDX_RSS_IPV4_SLOT = 8,
- RT_IDX_RSS_IPV6_SLOT = 9,
- RT_IDX_RSS_TCP4_SLOT = 10,
- RT_IDX_RSS_TCP6_SLOT = 11,
- RT_IDX_CAM_HIT_SLOT = 12,
- RT_IDX_UNUSED013 = 13,
- RT_IDX_UNUSED014 = 14,
- RT_IDX_PROMISCUOUS_SLOT = 15,
- RT_IDX_MAX_RT_SLOTS = 8,
- RT_IDX_MAX_NIC_SLOTS = 16,
-};
-
-/*
- * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
- */
-enum {
- XG_SERDES_ADDR_RDY = (1 << 31),
- XG_SERDES_ADDR_R = (1 << 30),
-
- XG_SERDES_ADDR_STS = 0x00001E06,
- XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
- XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
- XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
-
- /* Serdes coredump definitions. */
- XG_SERDES_XAUI_AN_START = 0x00000000,
- XG_SERDES_XAUI_AN_END = 0x00000034,
- XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
- XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
- XG_SERDES_XFI_AN_START = 0x00001000,
- XG_SERDES_XFI_AN_END = 0x00001034,
- XG_SERDES_XFI_TRAIN_START = 0x10001050,
- XG_SERDES_XFI_TRAIN_END = 0x1000107C,
- XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
- XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
- XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
- XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
- XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
- XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
- XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
- XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
-};
-
-/*
- * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
- */
-enum {
- PRB_MX_ADDR_ARE = (1 << 16),
- PRB_MX_ADDR_UP = (1 << 15),
- PRB_MX_ADDR_SWP = (1 << 14),
-
- /* Module select values. */
- PRB_MX_ADDR_MAX_MODS = 21,
- PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
- PRB_MX_ADDR_MOD_SEL_TBD = 0,
- PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
- PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
- PRB_MX_ADDR_MOD_SEL_FRB = 3,
- PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
- PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
- PRB_MX_ADDR_MOD_SEL_DA1 = 6,
- PRB_MX_ADDR_MOD_SEL_DA2 = 7,
- PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
- PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
- PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
- PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
- PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
- PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
- PRB_MX_ADDR_MOD_SEL_REG = 14,
- PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
- PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
- PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
- PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
- PRB_MX_ADDR_MOD_SEL_MOP = 20,
- /* Bit fields indicating which modules
- * are valid for each clock domain.
- */
- PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
- PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
- PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
- PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
- PRB_MX_ADDR_VALID_TOTAL = 34,
-
- /* Clock domain values. */
- PRB_MX_ADDR_CLOCK_SHIFT = 6,
- PRB_MX_ADDR_SYS_CLOCK = 0,
- PRB_MX_ADDR_PCI_CLOCK = 2,
- PRB_MX_ADDR_FC_CLOCK = 5,
- PRB_MX_ADDR_XGM_CLOCK = 6,
-
- PRB_MX_ADDR_MAX_MUX = 64,
-};
-
-/*
- * Control Register Set Map
- */
-enum {
- PROC_ADDR = 0, /* Use semaphore */
- PROC_DATA = 0x04, /* Use semaphore */
- SYS = 0x08,
- RST_FO = 0x0c,
- FSC = 0x10,
- CSR = 0x14,
- LED = 0x18,
- ICB_RID = 0x1c, /* Use semaphore */
- ICB_L = 0x20, /* Use semaphore */
- ICB_H = 0x24, /* Use semaphore */
- CFG = 0x28,
- BIOS_ADDR = 0x2c,
- STS = 0x30,
- INTR_EN = 0x34,
- INTR_MASK = 0x38,
- ISR1 = 0x3c,
- ISR2 = 0x40,
- ISR3 = 0x44,
- ISR4 = 0x48,
- REV_ID = 0x4c,
- FRC_ECC_ERR = 0x50,
- ERR_STS = 0x54,
- RAM_DBG_ADDR = 0x58,
- RAM_DBG_DATA = 0x5c,
- ECC_ERR_CNT = 0x60,
- SEM = 0x64,
- GPIO_1 = 0x68, /* Use semaphore */
- GPIO_2 = 0x6c, /* Use semaphore */
- GPIO_3 = 0x70, /* Use semaphore */
- RSVD2 = 0x74,
- XGMAC_ADDR = 0x78, /* Use semaphore */
- XGMAC_DATA = 0x7c, /* Use semaphore */
- NIC_ETS = 0x80,
- CNA_ETS = 0x84,
- FLASH_ADDR = 0x88, /* Use semaphore */
- FLASH_DATA = 0x8c, /* Use semaphore */
- CQ_STOP = 0x90,
- PAGE_TBL_RID = 0x94,
- WQ_PAGE_TBL_LO = 0x98,
- WQ_PAGE_TBL_HI = 0x9c,
- CQ_PAGE_TBL_LO = 0xa0,
- CQ_PAGE_TBL_HI = 0xa4,
- MAC_ADDR_IDX = 0xa8, /* Use semaphore */
- MAC_ADDR_DATA = 0xac, /* Use semaphore */
- COS_DFLT_CQ1 = 0xb0,
- COS_DFLT_CQ2 = 0xb4,
- ETYPE_SKIP1 = 0xb8,
- ETYPE_SKIP2 = 0xbc,
- SPLT_HDR = 0xc0,
- FC_PAUSE_THRES = 0xc4,
- NIC_PAUSE_THRES = 0xc8,
- FC_ETHERTYPE = 0xcc,
- FC_RCV_CFG = 0xd0,
- NIC_RCV_CFG = 0xd4,
- FC_COS_TAGS = 0xd8,
- NIC_COS_TAGS = 0xdc,
- MGMT_RCV_CFG = 0xe0,
- RT_IDX = 0xe4,
- RT_DATA = 0xe8,
- RSVD7 = 0xec,
- XG_SERDES_ADDR = 0xf0,
- XG_SERDES_DATA = 0xf4,
- PRB_MX_ADDR = 0xf8, /* Use semaphore */
- PRB_MX_DATA = 0xfc, /* Use semaphore */
-};
-
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define SMALL_BUFFER_SIZE 256
-#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
-#define SPLT_SETTING FSC_DBRST_1024
-#define SPLT_LEN 0
-#define QLGE_SB_PAD 0
-#else
-#define SMALL_BUFFER_SIZE 512
-#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
-#define SPLT_SETTING FSC_SH
-#define SPLT_LEN (SPLT_HDR_EP | \
- min(SMALL_BUF_MAP_SIZE, 1023))
-#define QLGE_SB_PAD 32
-#endif
-
-/*
- * CAM output format.
- */
-enum {
- CAM_OUT_ROUTE_FC = 0,
- CAM_OUT_ROUTE_NIC = 1,
- CAM_OUT_FUNC_SHIFT = 2,
- CAM_OUT_RV = (1 << 4),
- CAM_OUT_SH = (1 << 15),
- CAM_OUT_CQ_ID_SHIFT = 5,
-};
-
-/*
- * Mailbox definitions
- */
-enum {
- /* Asynchronous Event Notifications */
- AEN_SYS_ERR = 0x00008002,
- AEN_LINK_UP = 0x00008011,
- AEN_LINK_DOWN = 0x00008012,
- AEN_IDC_CMPLT = 0x00008100,
- AEN_IDC_REQ = 0x00008101,
- AEN_IDC_EXT = 0x00008102,
- AEN_DCBX_CHG = 0x00008110,
- AEN_AEN_LOST = 0x00008120,
- AEN_AEN_SFP_IN = 0x00008130,
- AEN_AEN_SFP_OUT = 0x00008131,
- AEN_FW_INIT_DONE = 0x00008400,
- AEN_FW_INIT_FAIL = 0x00008401,
-
- /* Mailbox Command Opcodes. */
- MB_CMD_NOP = 0x00000000,
- MB_CMD_EX_FW = 0x00000002,
- MB_CMD_MB_TEST = 0x00000006,
- MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
- MB_CMD_ABOUT_FW = 0x00000008,
- MB_CMD_COPY_RISC_RAM = 0x0000000a,
- MB_CMD_LOAD_RISC_RAM = 0x0000000b,
- MB_CMD_DUMP_RISC_RAM = 0x0000000c,
- MB_CMD_WRITE_RAM = 0x0000000d,
- MB_CMD_INIT_RISC_RAM = 0x0000000e,
- MB_CMD_READ_RAM = 0x0000000f,
- MB_CMD_STOP_FW = 0x00000014,
- MB_CMD_MAKE_SYS_ERR = 0x0000002a,
- MB_CMD_WRITE_SFP = 0x00000030,
- MB_CMD_READ_SFP = 0x00000031,
- MB_CMD_INIT_FW = 0x00000060,
- MB_CMD_GET_IFCB = 0x00000061,
- MB_CMD_GET_FW_STATE = 0x00000069,
- MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
- MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
- MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
- MB_WOL_DISABLE = 0,
- MB_WOL_MAGIC_PKT = (1 << 1),
- MB_WOL_FLTR = (1 << 2),
- MB_WOL_UCAST = (1 << 3),
- MB_WOL_MCAST = (1 << 4),
- MB_WOL_BCAST = (1 << 5),
- MB_WOL_LINK_UP = (1 << 6),
- MB_WOL_LINK_DOWN = (1 << 7),
- MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
- MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
- MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
- MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
- MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
- MB_CMD_SET_WOL_IMMED = 0x00000115,
- MB_CMD_PORT_RESET = 0x00000120,
- MB_CMD_SET_PORT_CFG = 0x00000122,
- MB_CMD_GET_PORT_CFG = 0x00000123,
- MB_CMD_GET_LINK_STS = 0x00000124,
- MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
- QL_LED_BLINK = 0x03e803e8,
- MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
- MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
- MB_SET_MPI_TFK_STOP = (1 << 0),
- MB_SET_MPI_TFK_RESUME = (1 << 1),
- MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
- MB_GET_MPI_TFK_STOPPED = (1 << 0),
- MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
- /* Sub-commands for IDC request.
- * This describes the reason for the
- * IDC request.
- */
- MB_CMD_IOP_NONE = 0x0000,
- MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
- MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
- MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
- MB_CMD_IOP_DVR_START = 0x0100,
- MB_CMD_IOP_FLASH_ACC = 0x0101,
- MB_CMD_IOP_RESTART_MPI = 0x0102,
- MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
-
- /* Mailbox Command Status. */
- MB_CMD_STS_GOOD = 0x00004000, /* Success. */
- MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
- MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
- MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
- MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
- MB_CMD_STS_ERR = 0x00004005, /* System Error. */
- MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
-};
-
-struct mbox_params {
- u32 mbox_in[MAILBOX_COUNT];
- u32 mbox_out[MAILBOX_COUNT];
- int in_count;
- int out_count;
-};
-
-struct flash_params_8012 {
- u8 dev_id_str[4];
- __le16 size;
- __le16 csum;
- __le16 ver;
- __le16 sub_dev_id;
- u8 mac_addr[6];
- __le16 res;
-};
-
-/* 8000 device's flash is a different structure
- * at a different offset in flash.
- */
-#define FUNC0_FLASH_OFFSET 0x140200
-#define FUNC1_FLASH_OFFSET 0x140600
-
-/* Flash related data structures. */
-struct flash_params_8000 {
- u8 dev_id_str[4]; /* "8000" */
- __le16 ver;
- __le16 size;
- __le16 csum;
- __le16 reserved0;
- __le16 total_size;
- __le16 entry_count;
- u8 data_type0;
- u8 data_size0;
- u8 mac_addr[6];
- u8 data_type1;
- u8 data_size1;
- u8 mac_addr1[6];
- u8 data_type2;
- u8 data_size2;
- __le16 vlan_id;
- u8 data_type3;
- u8 data_size3;
- __le16 last;
- u8 reserved1[464];
- __le16 subsys_ven_id;
- __le16 subsys_dev_id;
- u8 reserved2[4];
-};
-
-union flash_params {
- struct flash_params_8012 flash_params_8012;
- struct flash_params_8000 flash_params_8000;
-};
-
-/*
- * doorbell space for the rx ring context
- */
-struct rx_doorbell_context {
- u32 cnsmr_idx; /* 0x00 */
- u32 valid; /* 0x04 */
- u32 reserved[4]; /* 0x08-0x14 */
- u32 lbq_prod_idx; /* 0x18 */
- u32 sbq_prod_idx; /* 0x1c */
-};
-
-/*
- * doorbell space for the tx ring context
- */
-struct tx_doorbell_context {
- u32 prod_idx; /* 0x00 */
- u32 valid; /* 0x04 */
- u32 reserved[4]; /* 0x08-0x14 */
- u32 lbq_prod_idx; /* 0x18 */
- u32 sbq_prod_idx; /* 0x1c */
-};
-
-/* DATA STRUCTURES SHARED WITH HARDWARE. */
-struct tx_buf_desc {
- __le64 addr;
- __le32 len;
-#define TX_DESC_LEN_MASK 0x000fffff
-#define TX_DESC_C 0x40000000
-#define TX_DESC_E 0x80000000
-} __packed;
-
-/*
- * IOCB Definitions...
- */
-
-#define OPCODE_OB_MAC_IOCB 0x01
-#define OPCODE_OB_MAC_TSO_IOCB 0x02
-#define OPCODE_IB_MAC_IOCB 0x20
-#define OPCODE_IB_MPI_IOCB 0x21
-#define OPCODE_IB_AE_IOCB 0x3f
-
-struct ob_mac_iocb_req {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_IOCB_REQ_OI 0x01
-#define OB_MAC_IOCB_REQ_I 0x02
-#define OB_MAC_IOCB_REQ_D 0x08
-#define OB_MAC_IOCB_REQ_F 0x10
- u8 flags2;
- u8 flags3;
-#define OB_MAC_IOCB_DFP 0x02
-#define OB_MAC_IOCB_V 0x04
- __le32 reserved1[2];
- __le16 frame_len;
-#define OB_MAC_IOCB_LEN_MASK 0x3ffff
- __le16 reserved2;
- u32 tid;
- u32 txq_idx;
- __le32 reserved3;
- __le16 vlan_tci;
- __le16 reserved4;
- struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __packed;
-
-struct ob_mac_iocb_rsp {
- u8 opcode; /* */
- u8 flags1; /* */
-#define OB_MAC_IOCB_RSP_OI 0x01 /* */
-#define OB_MAC_IOCB_RSP_I 0x02 /* */
-#define OB_MAC_IOCB_RSP_E 0x08 /* */
-#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
-#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
-#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
- u8 flags2; /* */
- u8 flags3; /* */
-#define OB_MAC_IOCB_RSP_B 0x80 /* */
- u32 tid;
- u32 txq_idx;
- __le32 reserved[13];
-} __packed;
-
-struct ob_mac_tso_iocb_req {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_TSO_IOCB_OI 0x01
-#define OB_MAC_TSO_IOCB_I 0x02
-#define OB_MAC_TSO_IOCB_D 0x08
-#define OB_MAC_TSO_IOCB_IP4 0x40
-#define OB_MAC_TSO_IOCB_IP6 0x80
- u8 flags2;
-#define OB_MAC_TSO_IOCB_LSO 0x20
-#define OB_MAC_TSO_IOCB_UC 0x40
-#define OB_MAC_TSO_IOCB_TC 0x80
- u8 flags3;
-#define OB_MAC_TSO_IOCB_IC 0x01
-#define OB_MAC_TSO_IOCB_DFP 0x02
-#define OB_MAC_TSO_IOCB_V 0x04
- __le32 reserved1[2];
- __le32 frame_len;
- u32 tid;
- u32 txq_idx;
- __le16 total_hdrs_len;
- __le16 net_trans_offset;
-#define OB_MAC_TRANSPORT_HDR_SHIFT 6
- __le16 vlan_tci;
- __le16 mss;
- struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __packed;
-
-struct ob_mac_tso_iocb_rsp {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_TSO_IOCB_RSP_OI 0x01
-#define OB_MAC_TSO_IOCB_RSP_I 0x02
-#define OB_MAC_TSO_IOCB_RSP_E 0x08
-#define OB_MAC_TSO_IOCB_RSP_S 0x10
-#define OB_MAC_TSO_IOCB_RSP_L 0x20
-#define OB_MAC_TSO_IOCB_RSP_P 0x40
- u8 flags2; /* */
- u8 flags3; /* */
-#define OB_MAC_TSO_IOCB_RSP_B 0x8000
- u32 tid;
- u32 txq_idx;
- __le32 reserved2[13];
-} __packed;
-
-struct ib_mac_iocb_rsp {
- u8 opcode; /* 0x20 */
- u8 flags1;
-#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
-#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */
-#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
-#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
-#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
-#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
-#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
-#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
-#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
-#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
-#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
-#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
- u8 flags2;
-#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
-#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
-#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
-#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
-#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
-#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
-#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
-#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
-#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
-#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
-#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
-#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
- u8 flags3;
-#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
-#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
-#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
-#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
-#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
-#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
-#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
-#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
-#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
-#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
-#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
- __le32 data_len; /* */
- __le64 data_addr; /* */
- __le32 rss; /* */
- __le16 vlan_id; /* 12 bits */
-#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
-#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
-#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
-
- __le16 reserved1;
- __le32 reserved2[6];
- u8 reserved3[3];
- u8 flags4;
-#define IB_MAC_IOCB_RSP_HV 0x20
-#define IB_MAC_IOCB_RSP_HS 0x40
-#define IB_MAC_IOCB_RSP_HL 0x80
- __le32 hdr_len; /* */
- __le64 hdr_addr; /* */
-} __packed;
-
-struct ib_ae_iocb_rsp {
- u8 opcode;
- u8 flags1;
-#define IB_AE_IOCB_RSP_OI 0x01
-#define IB_AE_IOCB_RSP_I 0x02
- u8 event;
-#define LINK_UP_EVENT 0x00
-#define LINK_DOWN_EVENT 0x01
-#define CAM_LOOKUP_ERR_EVENT 0x06
-#define SOFT_ECC_ERROR_EVENT 0x07
-#define MGMT_ERR_EVENT 0x08
-#define TEN_GIG_MAC_EVENT 0x09
-#define GPI0_H2L_EVENT 0x10
-#define GPI0_L2H_EVENT 0x20
-#define GPI1_H2L_EVENT 0x11
-#define GPI1_L2H_EVENT 0x21
-#define PCI_ERR_ANON_BUF_RD 0x40
- u8 q_id;
- __le32 reserved[15];
-} __packed;
-
-/*
- * These three structures are for generic
- * handling of ib and ob iocbs.
- */
-struct ql_net_rsp_iocb {
- u8 opcode;
- u8 flags0;
- __le16 length;
- __le32 tid;
- __le32 reserved[14];
-} __packed;
-
-struct net_req_iocb {
- u8 opcode;
- u8 flags0;
- __le16 flags1;
- __le32 tid;
- __le32 reserved1[30];
-} __packed;
-
-/*
- * tx ring initialization control block for chip.
- * It is defined as:
- * "Work Queue Initialization Control Block"
- */
-struct wqicb {
- __le16 len;
-#define Q_LEN_V (1 << 4)
-#define Q_LEN_CPP_CONT 0x0000
-#define Q_LEN_CPP_16 0x0001
-#define Q_LEN_CPP_32 0x0002
-#define Q_LEN_CPP_64 0x0003
-#define Q_LEN_CPP_512 0x0006
- __le16 flags;
-#define Q_PRI_SHIFT 1
-#define Q_FLAGS_LC 0x1000
-#define Q_FLAGS_LB 0x2000
-#define Q_FLAGS_LI 0x4000
-#define Q_FLAGS_LO 0x8000
- __le16 cq_id_rss;
-#define Q_CQ_ID_RSS_RV 0x8000
- __le16 rid;
- __le64 addr;
- __le64 cnsmr_idx_addr;
-} __packed;
-
-/*
- * rx ring initialization control block for chip.
- * It is defined as:
- * "Completion Queue Initialization Control Block"
- */
-struct cqicb {
- u8 msix_vect;
- u8 reserved1;
- u8 reserved2;
- u8 flags;
-#define FLAGS_LV 0x08
-#define FLAGS_LS 0x10
-#define FLAGS_LL 0x20
-#define FLAGS_LI 0x40
-#define FLAGS_LC 0x80
- __le16 len;
-#define LEN_V (1 << 4)
-#define LEN_CPP_CONT 0x0000
-#define LEN_CPP_32 0x0001
-#define LEN_CPP_64 0x0002
-#define LEN_CPP_128 0x0003
- __le16 rid;
- __le64 addr;
- __le64 prod_idx_addr;
- __le16 pkt_delay;
- __le16 irq_delay;
- __le64 lbq_addr;
- __le16 lbq_buf_size;
- __le16 lbq_len; /* entry count */
- __le64 sbq_addr;
- __le16 sbq_buf_size;
- __le16 sbq_len; /* entry count */
-} __packed;
-
-struct ricb {
- u8 base_cq;
-#define RSS_L4K 0x80
- u8 flags;
-#define RSS_L6K 0x01
-#define RSS_LI 0x02
-#define RSS_LB 0x04
-#define RSS_LM 0x08
-#define RSS_RI4 0x10
-#define RSS_RT4 0x20
-#define RSS_RI6 0x40
-#define RSS_RT6 0x80
- __le16 mask;
- u8 hash_cq_id[1024];
- __le32 ipv6_hash_key[10];
- __le32 ipv4_hash_key[4];
-} __packed;
-
-/* SOFTWARE/DRIVER DATA STRUCTURES. */
-
-struct oal {
- struct tx_buf_desc oal[TX_DESC_PER_OAL];
-};
-
-struct map_list {
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
- DEFINE_DMA_UNMAP_LEN(maplen);
-};
-
-struct tx_ring_desc {
- struct sk_buff *skb;
- struct ob_mac_iocb_req *queue_entry;
- u32 index;
- struct oal oal;
- struct map_list map[MAX_SKB_FRAGS + 2];
- int map_cnt;
- struct tx_ring_desc *next;
-};
-
-struct page_chunk {
- struct page *page; /* master page */
- char *va; /* virt addr for this chunk */
- u64 map; /* mapping for master */
- unsigned int offset; /* offset for this chunk */
- unsigned int last_flag; /* flag set for last chunk in page */
-};
-
-struct bq_desc {
- union {
- struct page_chunk pg_chunk;
- struct sk_buff *skb;
- } p;
- __le64 *addr;
- u32 index;
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
- DEFINE_DMA_UNMAP_LEN(maplen);
-};
-
-#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
-
-struct tx_ring {
- /*
- * queue info.
- */
- struct wqicb wqicb; /* structure used to inform chip of new queue */
- void *wq_base; /* pci_alloc:virtual addr for tx */
- dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
- __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
- dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
- u32 wq_size; /* size in bytes of queue area */
- u32 wq_len; /* number of entries in queue */
- void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
- void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
- u16 prod_idx; /* current value for prod idx */
- u16 cq_id; /* completion (rx) queue for tx completions */
- u8 wq_id; /* queue id for this entry */
- u8 reserved1[3];
- struct tx_ring_desc *q; /* descriptor list for the queue */
- spinlock_t lock;
- atomic_t tx_count; /* counts down for every outstanding IO */
- struct delayed_work tx_work;
- struct ql_adapter *qdev;
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_errors;
-};
-
-/*
- * Type of inbound queue.
- */
-enum {
- DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
- TX_Q = 3, /* Handles outbound completions. */
- RX_Q = 4, /* Handles inbound completions. */
-};
-
-struct rx_ring {
- struct cqicb cqicb; /* The chip's completion queue init control block. */
-
- /* Completion queue elements. */
- void *cq_base;
- dma_addr_t cq_base_dma;
- u32 cq_size;
- u32 cq_len;
- u16 cq_id;
- __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
- dma_addr_t prod_idx_sh_reg_dma;
- void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
- u32 cnsmr_idx; /* current sw idx */
- struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
- void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
-
- /* Large buffer queue elements. */
- u32 lbq_len; /* entry count */
- u32 lbq_size; /* size in bytes of queue */
- u32 lbq_buf_size;
- void *lbq_base;
- dma_addr_t lbq_base_dma;
- void *lbq_base_indirect;
- dma_addr_t lbq_base_indirect_dma;
- struct page_chunk pg_chunk; /* current page for chunks */
- struct bq_desc *lbq; /* array of control blocks */
- void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
- u32 lbq_prod_idx; /* current sw prod idx */
- u32 lbq_curr_idx; /* next entry we expect */
- u32 lbq_clean_idx; /* beginning of new descs */
- u32 lbq_free_cnt; /* free buffer desc cnt */
-
- /* Small buffer queue elements. */
- u32 sbq_len; /* entry count */
- u32 sbq_size; /* size in bytes of queue */
- u32 sbq_buf_size;
- void *sbq_base;
- dma_addr_t sbq_base_dma;
- void *sbq_base_indirect;
- dma_addr_t sbq_base_indirect_dma;
- struct bq_desc *sbq; /* array of control blocks */
- void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
- u32 sbq_prod_idx; /* current sw prod idx */
- u32 sbq_curr_idx; /* next entry we expect */
- u32 sbq_clean_idx; /* beginning of new descs */
- u32 sbq_free_cnt; /* free buffer desc cnt */
-
- /* Misc. handler elements. */
- u32 type; /* Type of queue, tx, rx. */
- u32 irq; /* Which vector this ring is assigned. */
- u32 cpu; /* Which CPU this should run on. */
- char name[IFNAMSIZ + 5];
- struct napi_struct napi;
- u8 reserved;
- struct ql_adapter *qdev;
- u64 rx_packets;
- u64 rx_multicast;
- u64 rx_bytes;
- u64 rx_dropped;
- u64 rx_errors;
-};
-
-/*
- * RSS Initialization Control Block
- */
-struct hash_id {
- u8 value[4];
-};
-
-struct nic_stats {
- /*
- * These stats come from offset 200h to 278h
- * in the XGMAC register.
- */
- u64 tx_pkts;
- u64 tx_bytes;
- u64 tx_mcast_pkts;
- u64 tx_bcast_pkts;
- u64 tx_ucast_pkts;
- u64 tx_ctl_pkts;
- u64 tx_pause_pkts;
- u64 tx_64_pkt;
- u64 tx_65_to_127_pkt;
- u64 tx_128_to_255_pkt;
- u64 tx_256_511_pkt;
- u64 tx_512_to_1023_pkt;
- u64 tx_1024_to_1518_pkt;
- u64 tx_1519_to_max_pkt;
- u64 tx_undersize_pkt;
- u64 tx_oversize_pkt;
-
- /*
- * These stats come from offset 300h to 3C8h
- * in the XGMAC register.
- */
- u64 rx_bytes;
- u64 rx_bytes_ok;
- u64 rx_pkts;
- u64 rx_pkts_ok;
- u64 rx_bcast_pkts;
- u64 rx_mcast_pkts;
- u64 rx_ucast_pkts;
- u64 rx_undersize_pkts;
- u64 rx_oversize_pkts;
- u64 rx_jabber_pkts;
- u64 rx_undersize_fcerr_pkts;
- u64 rx_drop_events;
- u64 rx_fcerr_pkts;
- u64 rx_align_err;
- u64 rx_symbol_err;
- u64 rx_mac_err;
- u64 rx_ctl_pkts;
- u64 rx_pause_pkts;
- u64 rx_64_pkts;
- u64 rx_65_to_127_pkts;
- u64 rx_128_255_pkts;
- u64 rx_256_511_pkts;
- u64 rx_512_to_1023_pkts;
- u64 rx_1024_to_1518_pkts;
- u64 rx_1519_to_max_pkts;
- u64 rx_len_err_pkts;
- /* Receive Mac Err stats */
- u64 rx_code_err;
- u64 rx_oversize_err;
- u64 rx_undersize_err;
- u64 rx_preamble_err;
- u64 rx_frame_len_err;
- u64 rx_crc_err;
- u64 rx_err_count;
- /*
- * These stats come from offset 500h to 5C8h
- * in the XGMAC register.
- */
- u64 tx_cbfc_pause_frames0;
- u64 tx_cbfc_pause_frames1;
- u64 tx_cbfc_pause_frames2;
- u64 tx_cbfc_pause_frames3;
- u64 tx_cbfc_pause_frames4;
- u64 tx_cbfc_pause_frames5;
- u64 tx_cbfc_pause_frames6;
- u64 tx_cbfc_pause_frames7;
- u64 rx_cbfc_pause_frames0;
- u64 rx_cbfc_pause_frames1;
- u64 rx_cbfc_pause_frames2;
- u64 rx_cbfc_pause_frames3;
- u64 rx_cbfc_pause_frames4;
- u64 rx_cbfc_pause_frames5;
- u64 rx_cbfc_pause_frames6;
- u64 rx_cbfc_pause_frames7;
- u64 rx_nic_fifo_drop;
-};
-
-/* Firmware coredump internal register address/length pairs. */
-enum {
- MPI_CORE_REGS_ADDR = 0x00030000,
- MPI_CORE_REGS_CNT = 127,
- MPI_CORE_SH_REGS_CNT = 16,
- TEST_REGS_ADDR = 0x00001000,
- TEST_REGS_CNT = 23,
- RMII_REGS_ADDR = 0x00001040,
- RMII_REGS_CNT = 64,
- FCMAC1_REGS_ADDR = 0x00001080,
- FCMAC2_REGS_ADDR = 0x000010c0,
- FCMAC_REGS_CNT = 64,
- FC1_MBX_REGS_ADDR = 0x00001100,
- FC2_MBX_REGS_ADDR = 0x00001240,
- FC_MBX_REGS_CNT = 64,
- IDE_REGS_ADDR = 0x00001140,
- IDE_REGS_CNT = 64,
- NIC1_MBX_REGS_ADDR = 0x00001180,
- NIC2_MBX_REGS_ADDR = 0x00001280,
- NIC_MBX_REGS_CNT = 64,
- SMBUS_REGS_ADDR = 0x00001200,
- SMBUS_REGS_CNT = 64,
- I2C_REGS_ADDR = 0x00001fc0,
- I2C_REGS_CNT = 64,
- MEMC_REGS_ADDR = 0x00003000,
- MEMC_REGS_CNT = 256,
- PBUS_REGS_ADDR = 0x00007c00,
- PBUS_REGS_CNT = 256,
- MDE_REGS_ADDR = 0x00010000,
- MDE_REGS_CNT = 6,
- CODE_RAM_ADDR = 0x00020000,
- CODE_RAM_CNT = 0x2000,
- MEMC_RAM_ADDR = 0x00100000,
- MEMC_RAM_CNT = 0x2000,
-};
-
-#define MPI_COREDUMP_COOKIE 0x5555aaaa
-struct mpi_coredump_global_header {
- u32 cookie;
- u8 idString[16];
- u32 timeLo;
- u32 timeHi;
- u32 imageSize;
- u32 headerSize;
- u8 info[220];
-};
-
-struct mpi_coredump_segment_header {
- u32 cookie;
- u32 segNum;
- u32 segSize;
- u32 extra;
- u8 description[16];
-};
-
-/* Firmware coredump header segment numbers. */
-enum {
- CORE_SEG_NUM = 1,
- TEST_LOGIC_SEG_NUM = 2,
- RMII_SEG_NUM = 3,
- FCMAC1_SEG_NUM = 4,
- FCMAC2_SEG_NUM = 5,
- FC1_MBOX_SEG_NUM = 6,
- IDE_SEG_NUM = 7,
- NIC1_MBOX_SEG_NUM = 8,
- SMBUS_SEG_NUM = 9,
- FC2_MBOX_SEG_NUM = 10,
- NIC2_MBOX_SEG_NUM = 11,
- I2C_SEG_NUM = 12,
- MEMC_SEG_NUM = 13,
- PBUS_SEG_NUM = 14,
- MDE_SEG_NUM = 15,
- NIC1_CONTROL_SEG_NUM = 16,
- NIC2_CONTROL_SEG_NUM = 17,
- NIC1_XGMAC_SEG_NUM = 18,
- NIC2_XGMAC_SEG_NUM = 19,
- WCS_RAM_SEG_NUM = 20,
- MEMC_RAM_SEG_NUM = 21,
- XAUI_AN_SEG_NUM = 22,
- XAUI_HSS_PCS_SEG_NUM = 23,
- XFI_AN_SEG_NUM = 24,
- XFI_TRAIN_SEG_NUM = 25,
- XFI_HSS_PCS_SEG_NUM = 26,
- XFI_HSS_TX_SEG_NUM = 27,
- XFI_HSS_RX_SEG_NUM = 28,
- XFI_HSS_PLL_SEG_NUM = 29,
- MISC_NIC_INFO_SEG_NUM = 30,
- INTR_STATES_SEG_NUM = 31,
- CAM_ENTRIES_SEG_NUM = 32,
- ROUTING_WORDS_SEG_NUM = 33,
- ETS_SEG_NUM = 34,
- PROBE_DUMP_SEG_NUM = 35,
- ROUTING_INDEX_SEG_NUM = 36,
- MAC_PROTOCOL_SEG_NUM = 37,
- XAUI2_AN_SEG_NUM = 38,
- XAUI2_HSS_PCS_SEG_NUM = 39,
- XFI2_AN_SEG_NUM = 40,
- XFI2_TRAIN_SEG_NUM = 41,
- XFI2_HSS_PCS_SEG_NUM = 42,
- XFI2_HSS_TX_SEG_NUM = 43,
- XFI2_HSS_RX_SEG_NUM = 44,
- XFI2_HSS_PLL_SEG_NUM = 45,
- SEM_REGS_SEG_NUM = 50
-
-};
-
-/* There are 64 generic NIC registers. */
-#define NIC_REGS_DUMP_WORD_COUNT 64
-/* XGMAC word count. */
-#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
-/* Word counts for the SERDES blocks. */
-#define XG_SERDES_XAUI_AN_COUNT 14
-#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
-#define XG_SERDES_XFI_AN_COUNT 14
-#define XG_SERDES_XFI_TRAIN_COUNT 12
-#define XG_SERDES_XFI_HSS_PCS_COUNT 15
-#define XG_SERDES_XFI_HSS_TX_COUNT 32
-#define XG_SERDES_XFI_HSS_RX_COUNT 32
-#define XG_SERDES_XFI_HSS_PLL_COUNT 32
-
-/* There are 2 CNA ETS and 8 NIC ETS registers. */
-#define ETS_REGS_DUMP_WORD_COUNT 10
-
-/* Each probe mux entry stores the probe type plus 64 entries
- * that are each each 64-bits in length. There are a total of
- * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
- */
-#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
-#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
- PRB_MX_ADDR_VALID_TOTAL)
-/* Each routing entry consists of 4 32-bit words.
- * They are route type, index, index word, and result.
- * There are 2 route blocks with 8 entries each and
- * 2 NIC blocks with 16 entries each.
- * The totol entries is 48 with 4 words each.
- */
-#define RT_IDX_DUMP_ENTRIES 48
-#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
-#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
- RT_IDX_DUMP_WORDS_PER_ENTRY)
-/* There are 10 address blocks in filter, each with
- * different entry counts and different word-count-per-entry.
- */
-#define MAC_ADDR_DUMP_ENTRIES \
- ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
- (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
- (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
- (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
- (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
-#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
-#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
- MAC_ADDR_DUMP_WORDS_PER_ENTRY)
-/* Maximum of 4 functions whose semaphore registeres are
- * in the coredump.
- */
-#define MAX_SEMAPHORE_FUNCTIONS 4
-/* Defines for access the MPI shadow registers. */
-#define RISC_124 0x0003007c
-#define RISC_127 0x0003007f
-#define SHADOW_OFFSET 0xb0000000
-#define SHADOW_REG_SHIFT 20
-
-struct ql_nic_misc {
- u32 rx_ring_count;
- u32 tx_ring_count;
- u32 intr_count;
- u32 function;
-};
-
-struct ql_reg_dump {
-
- /* segment 0 */
- struct mpi_coredump_global_header mpi_global_header;
-
- /* segment 16 */
- struct mpi_coredump_segment_header nic_regs_seg_hdr;
- u32 nic_regs[64];
-
- /* segment 30 */
- struct mpi_coredump_segment_header misc_nic_seg_hdr;
- struct ql_nic_misc misc_nic_info;
-
- /* segment 31 */
- /* one interrupt state for each CQ */
- struct mpi_coredump_segment_header intr_states_seg_hdr;
- u32 intr_states[MAX_CPUS];
-
- /* segment 32 */
- /* 3 cam words each for 16 unicast,
- * 2 cam words for each of 32 multicast.
- */
- struct mpi_coredump_segment_header cam_entries_seg_hdr;
- u32 cam_entries[(16 * 3) + (32 * 3)];
-
- /* segment 33 */
- struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
- u32 nic_routing_words[16];
-
- /* segment 34 */
- struct mpi_coredump_segment_header ets_seg_hdr;
- u32 ets[8+2];
-};
-
-struct ql_mpi_coredump {
- /* segment 0 */
- struct mpi_coredump_global_header mpi_global_header;
-
- /* segment 1 */
- struct mpi_coredump_segment_header core_regs_seg_hdr;
- u32 mpi_core_regs[MPI_CORE_REGS_CNT];
- u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
-
- /* segment 2 */
- struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
- u32 test_logic_regs[TEST_REGS_CNT];
-
- /* segment 3 */
- struct mpi_coredump_segment_header rmii_regs_seg_hdr;
- u32 rmii_regs[RMII_REGS_CNT];
-
- /* segment 4 */
- struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
- u32 fcmac1_regs[FCMAC_REGS_CNT];
-
- /* segment 5 */
- struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
- u32 fcmac2_regs[FCMAC_REGS_CNT];
-
- /* segment 6 */
- struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
- u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
-
- /* segment 7 */
- struct mpi_coredump_segment_header ide_regs_seg_hdr;
- u32 ide_regs[IDE_REGS_CNT];
-
- /* segment 8 */
- struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
- u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
-
- /* segment 9 */
- struct mpi_coredump_segment_header smbus_regs_seg_hdr;
- u32 smbus_regs[SMBUS_REGS_CNT];
-
- /* segment 10 */
- struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
- u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
-
- /* segment 11 */
- struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
- u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
-
- /* segment 12 */
- struct mpi_coredump_segment_header i2c_regs_seg_hdr;
- u32 i2c_regs[I2C_REGS_CNT];
- /* segment 13 */
- struct mpi_coredump_segment_header memc_regs_seg_hdr;
- u32 memc_regs[MEMC_REGS_CNT];
-
- /* segment 14 */
- struct mpi_coredump_segment_header pbus_regs_seg_hdr;
- u32 pbus_regs[PBUS_REGS_CNT];
-
- /* segment 15 */
- struct mpi_coredump_segment_header mde_regs_seg_hdr;
- u32 mde_regs[MDE_REGS_CNT];
-
- /* segment 16 */
- struct mpi_coredump_segment_header nic_regs_seg_hdr;
- u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
-
- /* segment 17 */
- struct mpi_coredump_segment_header nic2_regs_seg_hdr;
- u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
-
- /* segment 18 */
- struct mpi_coredump_segment_header xgmac1_seg_hdr;
- u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
-
- /* segment 19 */
- struct mpi_coredump_segment_header xgmac2_seg_hdr;
- u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
-
- /* segment 20 */
- struct mpi_coredump_segment_header code_ram_seg_hdr;
- u32 code_ram[CODE_RAM_CNT];
-
- /* segment 21 */
- struct mpi_coredump_segment_header memc_ram_seg_hdr;
- u32 memc_ram[MEMC_RAM_CNT];
-
- /* segment 22 */
- struct mpi_coredump_segment_header xaui_an_hdr;
- u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
-
- /* segment 23 */
- struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
- u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
-
- /* segment 24 */
- struct mpi_coredump_segment_header xfi_an_hdr;
- u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
-
- /* segment 25 */
- struct mpi_coredump_segment_header xfi_train_hdr;
- u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
-
- /* segment 26 */
- struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
- u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
-
- /* segment 27 */
- struct mpi_coredump_segment_header xfi_hss_tx_hdr;
- u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
-
- /* segment 28 */
- struct mpi_coredump_segment_header xfi_hss_rx_hdr;
- u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
-
- /* segment 29 */
- struct mpi_coredump_segment_header xfi_hss_pll_hdr;
- u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
-
- /* segment 30 */
- struct mpi_coredump_segment_header misc_nic_seg_hdr;
- struct ql_nic_misc misc_nic_info;
-
- /* segment 31 */
- /* one interrupt state for each CQ */
- struct mpi_coredump_segment_header intr_states_seg_hdr;
- u32 intr_states[MAX_RX_RINGS];
-
- /* segment 32 */
- /* 3 cam words each for 16 unicast,
- * 2 cam words for each of 32 multicast.
- */
- struct mpi_coredump_segment_header cam_entries_seg_hdr;
- u32 cam_entries[(16 * 3) + (32 * 3)];
-
- /* segment 33 */
- struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
- u32 nic_routing_words[16];
- /* segment 34 */
- struct mpi_coredump_segment_header ets_seg_hdr;
- u32 ets[ETS_REGS_DUMP_WORD_COUNT];
-
- /* segment 35 */
- struct mpi_coredump_segment_header probe_dump_seg_hdr;
- u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
-
- /* segment 36 */
- struct mpi_coredump_segment_header routing_reg_seg_hdr;
- u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
-
- /* segment 37 */
- struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
- u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
-
- /* segment 38 */
- struct mpi_coredump_segment_header xaui2_an_hdr;
- u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
-
- /* segment 39 */
- struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
- u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
-
- /* segment 40 */
- struct mpi_coredump_segment_header xfi2_an_hdr;
- u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
-
- /* segment 41 */
- struct mpi_coredump_segment_header xfi2_train_hdr;
- u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
-
- /* segment 42 */
- struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
- u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
-
- /* segment 43 */
- struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
- u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
-
- /* segment 44 */
- struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
- u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
-
- /* segment 45 */
- struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
- u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
-
- /* segment 50 */
- /* semaphore register for all 5 functions */
- struct mpi_coredump_segment_header sem_regs_seg_hdr;
- u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
-};
-
-/*
- * intr_context structure is used during initialization
- * to hook the interrupts. It is also used in a single
- * irq environment as a context to the ISR.
- */
-struct intr_context {
- struct ql_adapter *qdev;
- u32 intr;
- u32 irq_mask; /* Mask of which rings the vector services. */
- u32 hooked;
- u32 intr_en_mask; /* value/mask used to enable this intr */
- u32 intr_dis_mask; /* value/mask used to disable this intr */
- u32 intr_read_mask; /* value/mask used to read this intr */
- char name[IFNAMSIZ * 2];
- atomic_t irq_cnt; /* irq_cnt is used in single vector
- * environment. It's incremented for each
- * irq handler that is scheduled. When each
- * handler finishes it decrements irq_cnt and
- * enables interrupts if it's zero. */
- irq_handler_t handler;
-};
-
-/* adapter flags definitions. */
-enum {
- QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
- QL_LEGACY_ENABLED = 1,
- QL_MSI_ENABLED = 2,
- QL_MSIX_ENABLED = 3,
- QL_DMA64 = 4,
- QL_PROMISCUOUS = 5,
- QL_ALLMULTI = 6,
- QL_PORT_CFG = 7,
- QL_CAM_RT_SET = 8,
- QL_SELFTEST = 9,
- QL_LB_LINK_UP = 10,
- QL_FRC_COREDUMP = 11,
- QL_EEH_FATAL = 12,
- QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
-};
-
-/* link_status bit definitions */
-enum {
- STS_LOOPBACK_MASK = 0x00000700,
- STS_LOOPBACK_PCS = 0x00000100,
- STS_LOOPBACK_HSS = 0x00000200,
- STS_LOOPBACK_EXT = 0x00000300,
- STS_PAUSE_MASK = 0x000000c0,
- STS_PAUSE_STD = 0x00000040,
- STS_PAUSE_PRI = 0x00000080,
- STS_SPEED_MASK = 0x00000038,
- STS_SPEED_100Mb = 0x00000000,
- STS_SPEED_1Gb = 0x00000008,
- STS_SPEED_10Gb = 0x00000010,
- STS_LINK_TYPE_MASK = 0x00000007,
- STS_LINK_TYPE_XFI = 0x00000001,
- STS_LINK_TYPE_XAUI = 0x00000002,
- STS_LINK_TYPE_XFI_BP = 0x00000003,
- STS_LINK_TYPE_XAUI_BP = 0x00000004,
- STS_LINK_TYPE_10GBASET = 0x00000005,
-};
-
-/* link_config bit definitions */
-enum {
- CFG_JUMBO_FRAME_SIZE = 0x00010000,
- CFG_PAUSE_MASK = 0x00000060,
- CFG_PAUSE_STD = 0x00000020,
- CFG_PAUSE_PRI = 0x00000040,
- CFG_DCBX = 0x00000010,
- CFG_LOOPBACK_MASK = 0x00000007,
- CFG_LOOPBACK_PCS = 0x00000002,
- CFG_LOOPBACK_HSS = 0x00000004,
- CFG_LOOPBACK_EXT = 0x00000006,
- CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
-};
-
-struct nic_operations {
-
- int (*get_flash) (struct ql_adapter *);
- int (*port_initialize) (struct ql_adapter *);
-};
-
-/*
- * The main Adapter structure definition.
- * This structure has all fields relevant to the hardware.
- */
-struct ql_adapter {
- struct ricb ricb;
- unsigned long flags;
- u32 wol;
-
- struct nic_stats nic_stats;
-
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-
- /* PCI Configuration information for this device */
- struct pci_dev *pdev;
- struct net_device *ndev; /* Parent NET device */
-
- /* Hardware information */
- u32 chip_rev_id;
- u32 fw_rev_id;
- u32 func; /* PCI function for this adapter */
- u32 alt_func; /* PCI function for alternate adapter */
- u32 port; /* Port number this adapter */
-
- spinlock_t adapter_lock;
- spinlock_t hw_lock;
- spinlock_t stats_lock;
-
- /* PCI Bus Relative Register Addresses */
- void __iomem *reg_base;
- void __iomem *doorbell_area;
- u32 doorbell_area_size;
-
- u32 msg_enable;
-
- /* Page for Shadow Registers */
- void *rx_ring_shadow_reg_area;
- dma_addr_t rx_ring_shadow_reg_dma;
- void *tx_ring_shadow_reg_area;
- dma_addr_t tx_ring_shadow_reg_dma;
-
- u32 mailbox_in;
- u32 mailbox_out;
- struct mbox_params idc_mbc;
- struct mutex mpi_mutex;
-
- int tx_ring_size;
- int rx_ring_size;
- u32 intr_count;
- struct msix_entry *msi_x_entry;
- struct intr_context intr_context[MAX_RX_RINGS];
-
- int tx_ring_count; /* One per online CPU. */
- u32 rss_ring_count; /* One per irq vector. */
- /*
- * rx_ring_count =
- * (CPU count * outbound completion rx_ring) +
- * (irq_vector_cnt * inbound (RSS) completion rx_ring)
- */
- int rx_ring_count;
- int ring_mem_size;
- void *ring_mem;
-
- struct rx_ring rx_ring[MAX_RX_RINGS];
- struct tx_ring tx_ring[MAX_TX_RINGS];
- unsigned int lbq_buf_order;
-
- int rx_csum;
- u32 default_rx_queue;
-
- u16 rx_coalesce_usecs; /* cqicb->int_delay */
- u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
- u16 tx_coalesce_usecs; /* cqicb->int_delay */
- u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
-
- u32 xg_sem_mask;
- u32 port_link_up;
- u32 port_init;
- u32 link_status;
- struct ql_mpi_coredump *mpi_coredump;
- u32 core_is_dumped;
- u32 link_config;
- u32 led_config;
- u32 max_frame_size;
-
- union flash_params flash;
-
- struct workqueue_struct *workqueue;
- struct delayed_work asic_reset_work;
- struct delayed_work mpi_reset_work;
- struct delayed_work mpi_work;
- struct delayed_work mpi_port_cfg_work;
- struct delayed_work mpi_idc_work;
- struct delayed_work mpi_core_to_log;
- struct completion ide_completion;
- const struct nic_operations *nic_ops;
- u16 device_id;
- struct timer_list timer;
- atomic_t lb_count;
- /* Keep local copy of current mac address. */
- char current_mac_addr[ETH_ALEN];
-};
-
-/*
- * Typical Register accessor for memory mapped device.
- */
-static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
-{
- return readl(qdev->reg_base + reg);
-}
-
-/*
- * Typical Register accessor for memory mapped device.
- */
-static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
-{
- writel(val, qdev->reg_base + reg);
-}
-
-/*
- * Doorbell Registers:
- * Doorbell registers are virtual registers in the PCI memory space.
- * The space is allocated by the chip during PCI initialization. The
- * device driver finds the doorbell address in BAR 3 in PCI config space.
- * The registers are used to control outbound and inbound queues. For
- * example, the producer index for an outbound queue. Each queue uses
- * 1 4k chunk of memory. The lower half of the space is for outbound
- * queues. The upper half is for inbound queues.
- */
-static inline void ql_write_db_reg(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
-/*
- * Doorbell Registers:
- * Doorbell registers are virtual registers in the PCI memory space.
- * The space is allocated by the chip during PCI initialization. The
- * device driver finds the doorbell address in BAR 3 in PCI config space.
- * The registers are used to control outbound and inbound queues. For
- * example, the producer index for an outbound queue. Each queue uses
- * 1 4k chunk of memory. The lower half of the space is for outbound
- * queues. The upper half is for inbound queues.
- * Caller has to guarantee ordering.
- */
-static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr)
-{
- writel_relaxed(val, addr);
-}
-
-/*
- * Shadow Registers:
- * Outbound queues have a consumer index that is maintained by the chip.
- * Inbound queues have a producer index that is maintained by the chip.
- * For lower overhead, these registers are "shadowed" to host memory
- * which allows the device driver to track the queue progress without
- * PCI reads. When an entry is placed on an inbound queue, the chip will
- * update the relevant index register and then copy the value to the
- * shadow register in host memory.
- */
-static inline u32 ql_read_sh_reg(__le32 *addr)
-{
- u32 reg;
- reg = le32_to_cpu(*addr);
- rmb();
- return reg;
-}
-
-extern char qlge_driver_name[];
-extern const char qlge_driver_version[];
-extern const struct ethtool_ops qlge_ethtool_ops;
-
-int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value);
-int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id);
-void ql_queue_fw_error(struct ql_adapter *qdev);
-void ql_mpi_work(struct work_struct *work);
-void ql_mpi_reset_work(struct work_struct *work);
-void ql_mpi_core_to_log(struct work_struct *work);
-int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
-void ql_queue_asic_error(struct ql_adapter *qdev);
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
-void ql_set_ethtool_ops(struct net_device *ndev);
-int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
-void ql_mpi_idc_work(struct work_struct *work);
-void ql_mpi_port_cfg_work(struct work_struct *work);
-int ql_mb_get_fw_state(struct ql_adapter *qdev);
-int ql_cam_route_initialize(struct ql_adapter *qdev);
-int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
-int ql_unpause_mpi_risc(struct ql_adapter *qdev);
-int ql_pause_mpi_risc(struct ql_adapter *qdev);
-int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
- int word_count);
-int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
-int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
-int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
-int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
-int ql_mb_get_led_cfg(struct ql_adapter *qdev);
-void ql_link_on(struct ql_adapter *qdev);
-void ql_link_off(struct ql_adapter *qdev);
-int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
-int ql_mb_get_port_cfg(struct ql_adapter *qdev);
-int ql_mb_set_port_cfg(struct ql_adapter *qdev);
-int ql_wait_fifo_empty(struct ql_adapter *qdev);
-void ql_get_dump(struct ql_adapter *qdev, void *buff);
-netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
-void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
-int ql_own_firmware(struct ql_adapter *qdev);
-int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-
-/* #define QL_ALL_DUMP */
-/* #define QL_REG_DUMP */
-/* #define QL_DEV_DUMP */
-/* #define QL_CB_DUMP */
-/* #define QL_IB_DUMP */
-/* #define QL_OB_DUMP */
-
-#ifdef QL_REG_DUMP
-void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-void ql_dump_routing_entries(struct ql_adapter *qdev);
-void ql_dump_regs(struct ql_adapter *qdev);
-#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
-#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
-#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
-#else
-#define QL_DUMP_REGS(qdev)
-#define QL_DUMP_ROUTE(qdev)
-#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
-#endif
-
-#ifdef QL_STAT_DUMP
-void ql_dump_stat(struct ql_adapter *qdev);
-#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
-#else
-#define QL_DUMP_STAT(qdev)
-#endif
-
-#ifdef QL_DEV_DUMP
-void ql_dump_qdev(struct ql_adapter *qdev);
-#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
-#else
-#define QL_DUMP_QDEV(qdev)
-#endif
-
-#ifdef QL_CB_DUMP
-void ql_dump_wqicb(struct wqicb *wqicb);
-void ql_dump_tx_ring(struct tx_ring *tx_ring);
-void ql_dump_ricb(struct ricb *ricb);
-void ql_dump_cqicb(struct cqicb *cqicb);
-void ql_dump_rx_ring(struct rx_ring *rx_ring);
-void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
-#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
-#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
-#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
-#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
-#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
-#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
- ql_dump_hw_cb(qdev, size, bit, q_id)
-#else
-#define QL_DUMP_RICB(ricb)
-#define QL_DUMP_WQICB(wqicb)
-#define QL_DUMP_TX_RING(tx_ring)
-#define QL_DUMP_CQICB(cqicb)
-#define QL_DUMP_RX_RING(rx_ring)
-#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
-#endif
-
-#ifdef QL_OB_DUMP
-void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
-#else
-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
-#endif
-
-#ifdef QL_IB_DUMP
-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
-#else
-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
-#endif
-
-#ifdef QL_ALL_DUMP
-void ql_dump_all(struct ql_adapter *qdev);
-#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
-#else
-#define QL_DUMP_ALL(qdev)
-#endif
-
-#endif /* _QLGE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
deleted file mode 100644
index 31389ab8bdf7..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ /dev/null
@@ -1,2024 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-
-#include "qlge.h"
-
-/* Read a NIC register from the alternate function. */
-static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
- u32 reg)
-{
- u32 register_to_read;
- u32 reg_val;
- unsigned int status = 0;
-
- register_to_read = MPI_NIC_REG_BLOCK
- | MPI_NIC_READ
- | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
- | reg;
- status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
- if (status != 0)
- return 0xffffffff;
-
- return reg_val;
-}
-
-/* Write a NIC register from the alternate function. */
-static int ql_write_other_func_reg(struct ql_adapter *qdev,
- u32 reg, u32 reg_val)
-{
- u32 register_to_read;
- int status = 0;
-
- register_to_read = MPI_NIC_REG_BLOCK
- | MPI_NIC_READ
- | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
- | reg;
- status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
-
- return status;
-}
-
-static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
- u32 bit, u32 err_bit)
-{
- u32 temp;
- int count = 10;
-
- while (count) {
- temp = ql_read_other_func_reg(qdev, reg);
-
- /* check for errors */
- if (temp & err_bit)
- return -1;
- else if (temp & bit)
- return 0;
- mdelay(10);
- count--;
- }
- return -1;
-}
-
-static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
-{
- int status;
-
- /* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* set up for reg read */
- ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
-
- /* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* get the data */
- *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
-exit:
- return status;
-}
-
-/* Read out the SERDES registers */
-static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
-{
- int status;
-
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* set up for reg read */
- ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
-
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* get the data */
- *data = ql_read32(qdev, XG_SERDES_DATA);
-exit:
- return status;
-}
-
-static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
- u32 *direct_ptr, u32 *indirect_ptr,
- unsigned int direct_valid, unsigned int indirect_valid)
-{
- unsigned int status;
-
- status = 1;
- if (direct_valid)
- status = ql_read_serdes_reg(qdev, addr, direct_ptr);
- /* Dead fill any failures or invalids. */
- if (status)
- *direct_ptr = 0xDEADBEEF;
-
- status = 1;
- if (indirect_valid)
- status = ql_read_other_func_serdes_reg(
- qdev, addr, indirect_ptr);
- /* Dead fill any failures or invalids. */
- if (status)
- *indirect_ptr = 0xDEADBEEF;
-}
-
-static int ql_get_serdes_regs(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump)
-{
- int status;
- unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
- unsigned int xaui_indirect_valid, i;
- u32 *direct_ptr, temp;
- u32 *indirect_ptr;
-
- xfi_direct_valid = xfi_indirect_valid = 0;
- xaui_direct_valid = xaui_indirect_valid = 1;
-
- /* The XAUI needs to be read out per port */
- status = ql_read_other_func_serdes_reg(qdev,
- XG_SERDES_XAUI_HSS_PCS_START, &temp);
- if (status)
- temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
-
- if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
- XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_indirect_valid = 0;
-
- status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
-
- if (status)
- temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
-
- if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
- XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_direct_valid = 0;
-
- /*
- * XFI register is shared so only need to read one
- * functions and then check the bits.
- */
- status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
- if (status)
- temp = 0;
-
- if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
- XG_SERDES_ADDR_XFI1_PWR_UP) {
- /* now see if i'm NIC 1 or NIC 2 */
- if (qdev->func & 1)
- /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_indirect_valid = 1;
- else
- xfi_direct_valid = 1;
- }
- if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
- XG_SERDES_ADDR_XFI2_PWR_UP) {
- /* now see if i'm NIC 1 or NIC 2 */
- if (qdev->func & 1)
- /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_direct_valid = 1;
- else
- xfi_indirect_valid = 1;
- }
-
- /* Get XAUI_AN register block. */
- if (qdev->func & 1) {
- /* Function 2 is direct */
- direct_ptr = mpi_coredump->serdes2_xaui_an;
- indirect_ptr = mpi_coredump->serdes_xaui_an;
- } else {
- /* Function 1 is direct */
- direct_ptr = mpi_coredump->serdes_xaui_an;
- indirect_ptr = mpi_coredump->serdes2_xaui_an;
- }
-
- for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
-
- /* Get XAUI_HSS_PCS register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xaui_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes_xaui_hss_pcs;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xaui_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes2_xaui_hss_pcs;
- }
-
- for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
-
- /* Get XAUI_XFI_AN register block. */
- if (qdev->func & 1) {
- direct_ptr = mpi_coredump->serdes2_xfi_an;
- indirect_ptr = mpi_coredump->serdes_xfi_an;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_an;
- indirect_ptr = mpi_coredump->serdes2_xfi_an;
- }
-
- for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_TRAIN register block. */
- if (qdev->func & 1) {
- direct_ptr = mpi_coredump->serdes2_xfi_train;
- indirect_ptr =
- mpi_coredump->serdes_xfi_train;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_train;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_train;
- }
-
- for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_PCS register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_pcs;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xfi_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_pcs;
- }
-
- for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_TX register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_tx;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_tx;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_tx;
- }
- for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_RX register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_rx;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_rx;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_rx;
- }
-
- for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
-
- /* Get XAUI_XFI_HSS_PLL register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_pll;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_pll;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xfi_hss_pll;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_pll;
- }
- for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
- return 0;
-}
-
-static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
-{
- int status = 0;
-
- /* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
-
- /* set up for reg read */
- ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
-
- /* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
-
- /* get the data */
- *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
-exit:
- return status;
-}
-
-/* Read the 400 xgmac control/statistics registers
- * skipping unused locations.
- */
-static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
- unsigned int other_function)
-{
- int status = 0;
- int i;
-
- for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
- /* We're reading 400 xgmac registers, but we filter out
- * serveral locations that are non-responsive to reads.
- */
- if ((i == 0x00000114) ||
- (i == 0x00000118) ||
- (i == 0x0000013c) ||
- (i == 0x00000140) ||
- (i > 0x00000150 && i < 0x000001fc) ||
- (i > 0x00000278 && i < 0x000002a0) ||
- (i > 0x000002c0 && i < 0x000002cf) ||
- (i > 0x000002dc && i < 0x000002f0) ||
- (i > 0x000003c8 && i < 0x00000400) ||
- (i > 0x00000400 && i < 0x00000410) ||
- (i > 0x00000410 && i < 0x00000420) ||
- (i > 0x00000420 && i < 0x00000430) ||
- (i > 0x00000430 && i < 0x00000440) ||
- (i > 0x00000440 && i < 0x00000450) ||
- (i > 0x00000450 && i < 0x00000500) ||
- (i > 0x0000054c && i < 0x00000568) ||
- (i > 0x000005c8 && i < 0x00000600)) {
- if (other_function)
- status =
- ql_read_other_func_xgmac_reg(qdev, i, buf);
- else
- status = ql_read_xgmac_reg(qdev, i, buf);
-
- if (status)
- *buf = 0xdeadbeef;
- break;
- }
- }
- return status;
-}
-
-static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
-{
- int status = 0;
- int i;
-
- for (i = 0; i < 8; i++, buf++) {
- ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
- *buf = ql_read32(qdev, NIC_ETS);
- }
-
- for (i = 0; i < 2; i++, buf++) {
- ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
- *buf = ql_read32(qdev, CNA_ETS);
- }
-
- return status;
-}
-
-static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
-{
- int i;
-
- for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
- ql_write32(qdev, INTR_EN,
- qdev->intr_context[i].intr_read_mask);
- *buf = ql_read32(qdev, INTR_EN);
- }
-}
-
-static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
-{
- int i, status;
- u32 value[3];
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- for (i = 0; i < 16; i++) {
- status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_CAM_MAC, i, value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of mac index register\n");
- goto err;
- }
- *buf++ = value[0]; /* lower MAC address */
- *buf++ = value[1]; /* upper MAC address */
- *buf++ = value[2]; /* output */
- }
- for (i = 0; i < 32; i++) {
- status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_MULTI_MAC, i, value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of mac index register\n");
- goto err;
- }
- *buf++ = value[0]; /* lower Mcast address */
- *buf++ = value[1]; /* upper Mcast address */
- }
-err:
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- return status;
-}
-
-static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
-{
- int status;
- u32 value, i;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- for (i = 0; i < 16; i++) {
- status = ql_get_routing_reg(qdev, i, &value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of routing index register\n");
- goto err;
- } else {
- *buf++ = value;
- }
- }
-err:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Read the MPI Processor shadow registers */
-static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
-{
- u32 i;
- int status;
-
- for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
- status = ql_write_mpi_reg(qdev, RISC_124,
- (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
- if (status)
- goto end;
- status = ql_read_mpi_reg(qdev, RISC_127, buf);
- if (status)
- goto end;
- }
-end:
- return status;
-}
-
-/* Read the MPI Processor core registers */
-static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
- u32 offset, u32 count)
-{
- int i, status = 0;
- for (i = 0; i < count; i++, buf++) {
- status = ql_read_mpi_reg(qdev, offset + i, buf);
- if (status)
- return status;
- }
- return status;
-}
-
-/* Read the ASIC probe dump */
-static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
- u32 valid, u32 *buf)
-{
- u32 module, mux_sel, probe, lo_val, hi_val;
-
- for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
- if (!((valid >> module) & 1))
- continue;
- for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
- probe = clock
- | PRB_MX_ADDR_ARE
- | mux_sel
- | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
- ql_write32(qdev, PRB_MX_ADDR, probe);
- lo_val = ql_read32(qdev, PRB_MX_DATA);
- if (mux_sel == 0) {
- *buf = probe;
- buf++;
- }
- probe |= PRB_MX_ADDR_UP;
- ql_write32(qdev, PRB_MX_ADDR, probe);
- hi_val = ql_read32(qdev, PRB_MX_DATA);
- *buf = lo_val;
- buf++;
- *buf = hi_val;
- buf++;
- }
- }
- return buf;
-}
-
-static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
-{
- /* First we have to enable the probe mux */
- ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
- PRB_MX_ADDR_VALID_SYS_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
- PRB_MX_ADDR_VALID_PCI_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
- PRB_MX_ADDR_VALID_XGM_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
- PRB_MX_ADDR_VALID_FC_MOD, buf);
- return 0;
-
-}
-
-/* Read out the routing index registers */
-static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
-{
- int status;
- u32 type, index, index_max;
- u32 result_index;
- u32 result_data;
- u32 val;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- for (type = 0; type < 4; type++) {
- if (type < 2)
- index_max = 8;
- else
- index_max = 16;
- for (index = 0; index < index_max; index++) {
- val = RT_IDX_RS
- | (type << RT_IDX_TYPE_SHIFT)
- | (index << RT_IDX_IDX_SHIFT);
- ql_write32(qdev, RT_IDX, val);
- result_index = 0;
- while ((result_index & RT_IDX_MR) == 0)
- result_index = ql_read32(qdev, RT_IDX);
- result_data = ql_read32(qdev, RT_DATA);
- *buf = type;
- buf++;
- *buf = index;
- buf++;
- *buf = result_index;
- buf++;
- *buf = result_data;
- buf++;
- }
- }
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Read out the MAC protocol registers */
-static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
-{
- u32 result_index, result_data;
- u32 type;
- u32 index;
- u32 offset;
- u32 val;
- u32 initial_val = MAC_ADDR_RS;
- u32 max_index;
- u32 max_offset;
-
- for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
- switch (type) {
-
- case 0: /* CAM */
- initial_val |= MAC_ADDR_ADR;
- max_index = MAC_ADDR_MAX_CAM_ENTRIES;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 1: /* Multicast MAC Address */
- max_index = MAC_ADDR_MAX_CAM_WCOUNT;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 2: /* VLAN filter mask */
- case 3: /* MC filter mask */
- max_index = MAC_ADDR_MAX_CAM_WCOUNT;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 4: /* FC MAC addresses */
- max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
- max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
- break;
- case 5: /* Mgmt MAC addresses */
- max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
- break;
- case 6: /* Mgmt VLAN addresses */
- max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
- break;
- case 7: /* Mgmt IPv4 address */
- max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
- break;
- case 8: /* Mgmt IPv6 address */
- max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
- break;
- case 9: /* Mgmt TCP/UDP Dest port */
- max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
- break;
- default:
- pr_err("Bad type!!! 0x%08x\n", type);
- max_index = 0;
- max_offset = 0;
- break;
- }
- for (index = 0; index < max_index; index++) {
- for (offset = 0; offset < max_offset; offset++) {
- val = initial_val
- | (type << MAC_ADDR_TYPE_SHIFT)
- | (index << MAC_ADDR_IDX_SHIFT)
- | (offset);
- ql_write32(qdev, MAC_ADDR_IDX, val);
- result_index = 0;
- while ((result_index & MAC_ADDR_MR) == 0) {
- result_index = ql_read32(qdev,
- MAC_ADDR_IDX);
- }
- result_data = ql_read32(qdev, MAC_ADDR_DATA);
- *buf = result_index;
- buf++;
- *buf = result_data;
- buf++;
- }
- }
- }
-}
-
-static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
-{
- u32 func_num, reg, reg_val;
- int status;
-
- for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
- reg = MPI_NIC_REG_BLOCK
- | (func_num << MPI_NIC_FUNCTION_SHIFT)
- | (SEM / 4);
- status = ql_read_mpi_reg(qdev, reg, &reg_val);
- *buf = reg_val;
- /* if the read failed then dead fill the element. */
- if (!status)
- *buf = 0xdeadbeef;
- buf++;
- }
-}
-
-/* Create a coredump segment header */
-static void ql_build_coredump_seg_header(
- struct mpi_coredump_segment_header *seg_hdr,
- u32 seg_number, u32 seg_size, u8 *desc)
-{
- memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
- seg_hdr->cookie = MPI_COREDUMP_COOKIE;
- seg_hdr->segNum = seg_number;
- seg_hdr->segSize = seg_size;
- strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
-}
-
-/*
- * This function should be called when a coredump / probedump
- * is to be extracted from the HBA. It is assumed there is a
- * qdev structure that contains the base address of the register
- * space for this function as well as a coredump structure that
- * will contain the dump.
- */
-int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
-{
- int status;
- int i;
-
- if (!mpi_coredump) {
- netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
- return -EINVAL;
- }
-
- /* Try to get the spinlock, but dont worry if
- * it isn't available. If the firmware died it
- * might be holding the sem.
- */
- ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
-
- status = ql_pause_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC pause. Status = 0x%.08x\n", status);
- goto err;
- }
-
- /* Insert the global header */
- memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
- mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.headerSize =
- sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.imageSize =
- sizeof(struct ql_mpi_coredump);
- strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.idString));
-
- /* Get generic NIC reg dump */
- ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
- NIC2_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
-
- /* Get XGMac registers. (Segment 18, Rev C. step 21) */
- ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
- NIC1_XGMAC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
- NIC2_XGMAC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
-
- if (qdev->func & 1) {
- /* Odd means our function is NIC 2 */
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic2_regs[i] =
- ql_read32(qdev, i * sizeof(u32));
-
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic_regs[i] =
- ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
-
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
- } else {
- /* Even means our function is NIC 1 */
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic_regs[i] =
- ql_read32(qdev, i * sizeof(u32));
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic2_regs[i] =
- ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
-
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
- }
-
- /* Rev C. Step 20a */
- ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
- XAUI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xaui_an),
- "XAUI AN Registers");
-
- /* Rev C. Step 20b */
- ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
- XAUI_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xaui_hss_pcs),
- "XAUI HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_an),
- "XFI AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
- XFI_TRAIN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_train),
- "XFI TRAIN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
- XFI_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_pcs),
- "XFI HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
- XFI_HSS_TX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_tx),
- "XFI HSS TX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
- XFI_HSS_RX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_rx),
- "XFI HSS RX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
- XFI_HSS_PLL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_pll),
- "XFI HSS PLL Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
- XAUI2_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xaui_an),
- "XAUI2 AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
- XAUI2_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
- "XAUI2 HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
- XFI2_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_an),
- "XFI2 AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
- XFI2_TRAIN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_train),
- "XFI2 TRAIN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
- XFI2_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
- "XFI2 HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
- XFI2_HSS_TX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_tx),
- "XFI2 HSS TX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
- XFI2_HSS_RX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_rx),
- "XFI2 HSS RX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
- XFI2_HSS_PLL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_pll),
- "XFI2 HSS PLL Registers");
-
- status = ql_get_serdes_regs(qdev, mpi_coredump);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
- status);
- goto err;
- }
-
- ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
- CORE_SEG_NUM,
- sizeof(mpi_coredump->core_regs_seg_hdr) +
- sizeof(mpi_coredump->mpi_core_regs) +
- sizeof(mpi_coredump->mpi_core_sh_regs),
- "Core Registers");
-
- /* Get the MPI Core Registers */
- status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
- MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
- if (status)
- goto err;
- /* Get the 16 MPI shadow registers */
- status = ql_get_mpi_shadow_regs(qdev,
- &mpi_coredump->mpi_core_sh_regs[0]);
- if (status)
- goto err;
-
- /* Get the Test Logic Registers */
- ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
- TEST_LOGIC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->test_logic_regs),
- "Test Logic Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
- TEST_REGS_ADDR, TEST_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the RMII Registers */
- ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
- RMII_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->rmii_regs),
- "RMII Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
- RMII_REGS_ADDR, RMII_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FCMAC1 Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
- FCMAC1_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fcmac1_regs),
- "FCMAC1 Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
- FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FCMAC2 Registers */
-
- ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
- FCMAC2_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fcmac2_regs),
- "FCMAC2 Registers");
-
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
- FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FC1 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
- FC1_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fc1_mbx_regs),
- "FC1 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
- FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the IDE Registers */
- ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
- IDE_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ide_regs),
- "IDE Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
- IDE_REGS_ADDR, IDE_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the NIC1 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
- NIC1_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic1_mbx_regs),
- "NIC1 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
- NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the SMBus Registers */
- ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
- SMBUS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->smbus_regs),
- "SMBus Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
- SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FC2 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
- FC2_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fc2_mbx_regs),
- "FC2 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
- FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the NIC2 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
- NIC2_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic2_mbx_regs),
- "NIC2 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
- NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the I2C Registers */
- ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
- I2C_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->i2c_regs),
- "I2C Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
- I2C_REGS_ADDR, I2C_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the MEMC Registers */
- ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
- MEMC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->memc_regs),
- "MEMC Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
- MEMC_REGS_ADDR, MEMC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the PBus Registers */
- ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
- PBUS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->pbus_regs),
- "PBUS Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
- PBUS_REGS_ADDR, PBUS_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the MDE Registers */
- ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
- MDE_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->mde_regs),
- "MDE Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
- MDE_REGS_ADDR, MDE_REGS_CNT);
- if (status)
- goto err;
-
- ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->misc_nic_info),
- "MISC NIC INFO");
- mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
- mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
- mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
- mpi_coredump->misc_nic_info.function = qdev->func;
-
- /* Segment 31 */
- /* Get indexed register values. */
- ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->intr_states),
- "INTR States");
- ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->cam_entries),
- "CAM Entries");
- status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
- if (status)
- goto err;
-
- ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_routing_words),
- "Routing Words");
- status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
- if (status)
- goto err;
-
- /* Segment 34 (Rev C. step 23) */
- ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ets),
- "ETS Registers");
- status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
- if (status)
- goto err;
-
- ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
- PROBE_DUMP_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->probe_dump),
- "Probe Dump");
- ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
- ROUTING_INDEX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->routing_regs),
- "Routing Regs");
- status = ql_get_routing_index_registers(qdev,
- &mpi_coredump->routing_regs[0]);
- if (status)
- goto err;
-
- ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
- MAC_PROTOCOL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->mac_prot_regs),
- "MAC Prot Regs");
- ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
-
- /* Get the semaphore registers for all 5 functions */
- ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
- SEM_REGS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->sem_regs), "Sem Registers");
-
- ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
-
- /* Prevent the mpi restarting while we dump the memory.*/
- ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
-
- /* clear the pause */
- status = ql_unpause_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC unpause. Status = 0x%.08x\n", status);
- goto err;
- }
-
- /* Reset the RISC so we can dump RAM */
- status = ql_hard_reset_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC reset. Status = 0x%.08x\n", status);
- goto err;
- }
-
- ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
- WCS_RAM_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->code_ram),
- "WCS RAM");
- status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
- CODE_RAM_ADDR, CODE_RAM_CNT);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of CODE RAM. Status = 0x%.08x\n",
- status);
- goto err;
- }
-
- /* Insert the segment header */
- ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
- MEMC_RAM_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->memc_ram),
- "MEMC RAM");
- status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
- MEMC_RAM_ADDR, MEMC_RAM_CNT);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
- status);
- goto err;
- }
-err:
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
- return status;
-
-}
-
-static void ql_get_core_dump(struct ql_adapter *qdev)
-{
- if (!ql_own_firmware(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
- return;
- }
-
- if (!netif_running(qdev->ndev)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Force Coredump can only be done from interface that is up\n");
- return;
- }
- ql_queue_fw_error(qdev);
-}
-
-static void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump)
-{
- int i, status;
-
-
- memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
- mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.headerSize =
- sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.imageSize =
- sizeof(struct ql_reg_dump);
- strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.idString));
-
-
- /* segment 16 */
- ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->misc_nic_info),
- "MISC NIC INFO");
- mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
- mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
- mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
- mpi_coredump->misc_nic_info.function = qdev->func;
-
- /* Segment 16, Rev C. Step 18 */
- ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_regs),
- "NIC Registers");
- /* Get generic reg dump */
- for (i = 0; i < 64; i++)
- mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
-
- /* Segment 31 */
- /* Get indexed register values. */
- ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->intr_states),
- "INTR States");
- ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->cam_entries),
- "CAM Entries");
- status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
- if (status)
- return;
-
- ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_routing_words),
- "Routing Words");
- status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
- if (status)
- return;
-
- /* Segment 34 (Rev C. step 23) */
- ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ets),
- "ETS Registers");
- status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
- if (status)
- return;
-}
-
-void ql_get_dump(struct ql_adapter *qdev, void *buff)
-{
- /*
- * If the dump has already been taken and is stored
- * in our internal buffer and if force dump is set then
- * just start the spool to dump it to the log file
- * and also, take a snapshot of the general regs to
- * to the user's buffer or else take complete dump
- * to the user's buffer if force is not set.
- */
-
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
- if (!ql_core_dump(qdev, buff))
- ql_soft_reset_mpi_risc(qdev);
- else
- netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
- } else {
- ql_gen_reg_dump(qdev, buff);
- ql_get_core_dump(qdev);
- }
-}
-
-/* Coredump to messages log file using separate worker thread */
-void ql_mpi_core_to_log(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_core_to_log.work);
- u32 *tmp, count;
- int i;
-
- count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
- tmp = (u32 *)qdev->mpi_coredump;
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "Core is dumping to log file!\n");
-
- for (i = 0; i < count; i += 8) {
- pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
- "%.08x %.08x %.08x\n", i,
- tmp[i + 0],
- tmp[i + 1],
- tmp[i + 2],
- tmp[i + 3],
- tmp[i + 4],
- tmp[i + 5],
- tmp[i + 6],
- tmp[i + 7]);
- msleep(5);
- }
-}
-
-#ifdef QL_REG_DUMP
-static void ql_dump_intr_states(struct ql_adapter *qdev)
-{
- int i;
- u32 value;
- for (i = 0; i < qdev->intr_count; i++) {
- ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
- value = ql_read32(qdev, INTR_EN);
- pr_err("%s: Interrupt %d is %s\n",
- qdev->ndev->name, i,
- (value & INTR_EN_EN ? "enabled" : "disabled"));
- }
-}
-
-#define DUMP_XGMAC(qdev, reg) \
-do { \
- u32 data; \
- ql_read_xgmac_reg(qdev, reg, &data); \
- pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
-} while (0)
-
-void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
-{
- if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- pr_err("%s: Couldn't get xgmac sem\n", __func__);
- return;
- }
- DUMP_XGMAC(qdev, PAUSE_SRC_LO);
- DUMP_XGMAC(qdev, PAUSE_SRC_HI);
- DUMP_XGMAC(qdev, GLOBAL_CFG);
- DUMP_XGMAC(qdev, TX_CFG);
- DUMP_XGMAC(qdev, RX_CFG);
- DUMP_XGMAC(qdev, FLOW_CTL);
- DUMP_XGMAC(qdev, PAUSE_OPCODE);
- DUMP_XGMAC(qdev, PAUSE_TIMER);
- DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
- DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
- DUMP_XGMAC(qdev, MAC_TX_PARAMS);
- DUMP_XGMAC(qdev, MAC_RX_PARAMS);
- DUMP_XGMAC(qdev, MAC_SYS_INT);
- DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
- DUMP_XGMAC(qdev, MAC_MGMT_INT);
- DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
- DUMP_XGMAC(qdev, EXT_ARB_MODE);
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
-}
-
-static void ql_dump_ets_regs(struct ql_adapter *qdev)
-{
-}
-
-static void ql_dump_cam_entries(struct ql_adapter *qdev)
-{
- int i;
- u32 value[3];
-
- i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (i)
- return;
- for (i = 0; i < 4; i++) {
- if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
- pr_err("%s: Failed read of mac index register\n",
- __func__);
- return;
- } else {
- if (value[0])
- pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
- qdev->ndev->name, i, value[1], value[0],
- value[2]);
- }
- }
- for (i = 0; i < 32; i++) {
- if (ql_get_mac_addr_reg
- (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
- pr_err("%s: Failed read of mac index register\n",
- __func__);
- return;
- } else {
- if (value[0])
- pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
- qdev->ndev->name, i, value[1], value[0]);
- }
- }
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-}
-
-void ql_dump_routing_entries(struct ql_adapter *qdev)
-{
- int i;
- u32 value;
- i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (i)
- return;
- for (i = 0; i < 16; i++) {
- value = 0;
- if (ql_get_routing_reg(qdev, i, &value)) {
- pr_err("%s: Failed read of routing index register\n",
- __func__);
- return;
- } else {
- if (value)
- pr_err("%s: Routing Mask %d = 0x%.08x\n",
- qdev->ndev->name, i, value);
- }
- }
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
-}
-
-#define DUMP_REG(qdev, reg) \
- pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
-
-void ql_dump_regs(struct ql_adapter *qdev)
-{
- pr_err("reg dump for function #%d\n", qdev->func);
- DUMP_REG(qdev, SYS);
- DUMP_REG(qdev, RST_FO);
- DUMP_REG(qdev, FSC);
- DUMP_REG(qdev, CSR);
- DUMP_REG(qdev, ICB_RID);
- DUMP_REG(qdev, ICB_L);
- DUMP_REG(qdev, ICB_H);
- DUMP_REG(qdev, CFG);
- DUMP_REG(qdev, BIOS_ADDR);
- DUMP_REG(qdev, STS);
- DUMP_REG(qdev, INTR_EN);
- DUMP_REG(qdev, INTR_MASK);
- DUMP_REG(qdev, ISR1);
- DUMP_REG(qdev, ISR2);
- DUMP_REG(qdev, ISR3);
- DUMP_REG(qdev, ISR4);
- DUMP_REG(qdev, REV_ID);
- DUMP_REG(qdev, FRC_ECC_ERR);
- DUMP_REG(qdev, ERR_STS);
- DUMP_REG(qdev, RAM_DBG_ADDR);
- DUMP_REG(qdev, RAM_DBG_DATA);
- DUMP_REG(qdev, ECC_ERR_CNT);
- DUMP_REG(qdev, SEM);
- DUMP_REG(qdev, GPIO_1);
- DUMP_REG(qdev, GPIO_2);
- DUMP_REG(qdev, GPIO_3);
- DUMP_REG(qdev, XGMAC_ADDR);
- DUMP_REG(qdev, XGMAC_DATA);
- DUMP_REG(qdev, NIC_ETS);
- DUMP_REG(qdev, CNA_ETS);
- DUMP_REG(qdev, FLASH_ADDR);
- DUMP_REG(qdev, FLASH_DATA);
- DUMP_REG(qdev, CQ_STOP);
- DUMP_REG(qdev, PAGE_TBL_RID);
- DUMP_REG(qdev, WQ_PAGE_TBL_LO);
- DUMP_REG(qdev, WQ_PAGE_TBL_HI);
- DUMP_REG(qdev, CQ_PAGE_TBL_LO);
- DUMP_REG(qdev, CQ_PAGE_TBL_HI);
- DUMP_REG(qdev, COS_DFLT_CQ1);
- DUMP_REG(qdev, COS_DFLT_CQ2);
- DUMP_REG(qdev, SPLT_HDR);
- DUMP_REG(qdev, FC_PAUSE_THRES);
- DUMP_REG(qdev, NIC_PAUSE_THRES);
- DUMP_REG(qdev, FC_ETHERTYPE);
- DUMP_REG(qdev, FC_RCV_CFG);
- DUMP_REG(qdev, NIC_RCV_CFG);
- DUMP_REG(qdev, FC_COS_TAGS);
- DUMP_REG(qdev, NIC_COS_TAGS);
- DUMP_REG(qdev, MGMT_RCV_CFG);
- DUMP_REG(qdev, XG_SERDES_ADDR);
- DUMP_REG(qdev, XG_SERDES_DATA);
- DUMP_REG(qdev, PRB_MX_ADDR);
- DUMP_REG(qdev, PRB_MX_DATA);
- ql_dump_intr_states(qdev);
- ql_dump_xgmac_control_regs(qdev);
- ql_dump_ets_regs(qdev);
- ql_dump_cam_entries(qdev);
- ql_dump_routing_entries(qdev);
-}
-#endif
-
-#ifdef QL_STAT_DUMP
-
-#define DUMP_STAT(qdev, stat) \
- pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
-
-void ql_dump_stat(struct ql_adapter *qdev)
-{
- pr_err("%s: Enter\n", __func__);
- DUMP_STAT(qdev, tx_pkts);
- DUMP_STAT(qdev, tx_bytes);
- DUMP_STAT(qdev, tx_mcast_pkts);
- DUMP_STAT(qdev, tx_bcast_pkts);
- DUMP_STAT(qdev, tx_ucast_pkts);
- DUMP_STAT(qdev, tx_ctl_pkts);
- DUMP_STAT(qdev, tx_pause_pkts);
- DUMP_STAT(qdev, tx_64_pkt);
- DUMP_STAT(qdev, tx_65_to_127_pkt);
- DUMP_STAT(qdev, tx_128_to_255_pkt);
- DUMP_STAT(qdev, tx_256_511_pkt);
- DUMP_STAT(qdev, tx_512_to_1023_pkt);
- DUMP_STAT(qdev, tx_1024_to_1518_pkt);
- DUMP_STAT(qdev, tx_1519_to_max_pkt);
- DUMP_STAT(qdev, tx_undersize_pkt);
- DUMP_STAT(qdev, tx_oversize_pkt);
- DUMP_STAT(qdev, rx_bytes);
- DUMP_STAT(qdev, rx_bytes_ok);
- DUMP_STAT(qdev, rx_pkts);
- DUMP_STAT(qdev, rx_pkts_ok);
- DUMP_STAT(qdev, rx_bcast_pkts);
- DUMP_STAT(qdev, rx_mcast_pkts);
- DUMP_STAT(qdev, rx_ucast_pkts);
- DUMP_STAT(qdev, rx_undersize_pkts);
- DUMP_STAT(qdev, rx_oversize_pkts);
- DUMP_STAT(qdev, rx_jabber_pkts);
- DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
- DUMP_STAT(qdev, rx_drop_events);
- DUMP_STAT(qdev, rx_fcerr_pkts);
- DUMP_STAT(qdev, rx_align_err);
- DUMP_STAT(qdev, rx_symbol_err);
- DUMP_STAT(qdev, rx_mac_err);
- DUMP_STAT(qdev, rx_ctl_pkts);
- DUMP_STAT(qdev, rx_pause_pkts);
- DUMP_STAT(qdev, rx_64_pkts);
- DUMP_STAT(qdev, rx_65_to_127_pkts);
- DUMP_STAT(qdev, rx_128_255_pkts);
- DUMP_STAT(qdev, rx_256_511_pkts);
- DUMP_STAT(qdev, rx_512_to_1023_pkts);
- DUMP_STAT(qdev, rx_1024_to_1518_pkts);
- DUMP_STAT(qdev, rx_1519_to_max_pkts);
- DUMP_STAT(qdev, rx_len_err_pkts);
-};
-#endif
-
-#ifdef QL_DEV_DUMP
-
-#define DUMP_QDEV_FIELD(qdev, type, field) \
- pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
-#define DUMP_QDEV_DMA_FIELD(qdev, field) \
- pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
-#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
- pr_err("%s[%d].%s = " type "\n", \
- #array, index, #field, qdev->array[index].field);
-void ql_dump_qdev(struct ql_adapter *qdev)
-{
- int i;
- DUMP_QDEV_FIELD(qdev, "%lx", flags);
- DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
- DUMP_QDEV_FIELD(qdev, "%p", pdev);
- DUMP_QDEV_FIELD(qdev, "%p", ndev);
- DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
- DUMP_QDEV_FIELD(qdev, "%p", reg_base);
- DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
- DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
- DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
- DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
- DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
- DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
- DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
- DUMP_QDEV_FIELD(qdev, "%d", intr_count);
- if (qdev->msi_x_entry)
- for (i = 0; i < qdev->intr_count; i++) {
- DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
- DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
- }
- for (i = 0; i < qdev->intr_count; i++) {
- DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
- DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
- DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
- }
- DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
- DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
- DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
- DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
- DUMP_QDEV_FIELD(qdev, "%d", intr_count);
- DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
- DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
- DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
- DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
- DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
- DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
- DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
-}
-#endif
-
-#ifdef QL_CB_DUMP
-void ql_dump_wqicb(struct wqicb *wqicb)
-{
- pr_err("Dumping wqicb stuff...\n");
- pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
- pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
- pr_err("wqicb->cq_id_rss = %d\n",
- le16_to_cpu(wqicb->cq_id_rss));
- pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
- pr_err("wqicb->wq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(wqicb->addr));
- pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
-}
-
-void ql_dump_tx_ring(struct tx_ring *tx_ring)
-{
- if (tx_ring == NULL)
- return;
- pr_err("===================== Dumping tx_ring %d ===============\n",
- tx_ring->wq_id);
- pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
- pr_err("tx_ring->base_dma = 0x%llx\n",
- (unsigned long long) tx_ring->wq_base_dma);
- pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
- tx_ring->cnsmr_idx_sh_reg,
- tx_ring->cnsmr_idx_sh_reg
- ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
- pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
- pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
- pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
- pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
- pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
- pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
- pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
- pr_err("tx_ring->q = %p\n", tx_ring->q);
- pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
-}
-
-void ql_dump_ricb(struct ricb *ricb)
-{
- int i;
- pr_err("===================== Dumping ricb ===============\n");
- pr_err("Dumping ricb stuff...\n");
-
- pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
- pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
- ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
- ricb->flags & RSS_L6K ? "RSS_L6K " : "",
- ricb->flags & RSS_LI ? "RSS_LI " : "",
- ricb->flags & RSS_LB ? "RSS_LB " : "",
- ricb->flags & RSS_LM ? "RSS_LM " : "",
- ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
- ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
- ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
- ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
- pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
- for (i = 0; i < 16; i++)
- pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->hash_cq_id[i]));
- for (i = 0; i < 10; i++)
- pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->ipv6_hash_key[i]));
- for (i = 0; i < 4; i++)
- pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->ipv4_hash_key[i]));
-}
-
-void ql_dump_cqicb(struct cqicb *cqicb)
-{
- pr_err("Dumping cqicb stuff...\n");
-
- pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
- pr_err("cqicb->flags = %x\n", cqicb->flags);
- pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
- pr_err("cqicb->addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->addr));
- pr_err("cqicb->prod_idx_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
- pr_err("cqicb->pkt_delay = 0x%.04x\n",
- le16_to_cpu(cqicb->pkt_delay));
- pr_err("cqicb->irq_delay = 0x%.04x\n",
- le16_to_cpu(cqicb->irq_delay));
- pr_err("cqicb->lbq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
- pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
- le16_to_cpu(cqicb->lbq_buf_size));
- pr_err("cqicb->lbq_len = 0x%.04x\n",
- le16_to_cpu(cqicb->lbq_len));
- pr_err("cqicb->sbq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
- pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
- le16_to_cpu(cqicb->sbq_buf_size));
- pr_err("cqicb->sbq_len = 0x%.04x\n",
- le16_to_cpu(cqicb->sbq_len));
-}
-
-void ql_dump_rx_ring(struct rx_ring *rx_ring)
-{
- if (rx_ring == NULL)
- return;
- pr_err("===================== Dumping rx_ring %d ===============\n",
- rx_ring->cq_id);
- pr_err("Dumping rx_ring %d, type = %s%s%s\n",
- rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
- rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
- rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
- pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
- pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
- pr_err("rx_ring->cq_base_dma = %llx\n",
- (unsigned long long) rx_ring->cq_base_dma);
- pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
- pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
- pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
- rx_ring->prod_idx_sh_reg,
- rx_ring->prod_idx_sh_reg
- ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
- pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
- (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
- pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
- rx_ring->cnsmr_idx_db_reg);
- pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
- pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
- pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
-
- pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
- pr_err("rx_ring->lbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_dma);
- pr_err("rx_ring->lbq_base_indirect = %p\n",
- rx_ring->lbq_base_indirect);
- pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_indirect_dma);
- pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
- pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
- pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
- pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
- rx_ring->lbq_prod_idx_db_reg);
- pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
- pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
- pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
- pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
- pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
-
- pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
- pr_err("rx_ring->sbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_dma);
- pr_err("rx_ring->sbq_base_indirect = %p\n",
- rx_ring->sbq_base_indirect);
- pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_indirect_dma);
- pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
- pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
- pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
- pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
- rx_ring->sbq_prod_idx_db_reg);
- pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
- pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
- pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
- pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
- pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
- pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
- pr_err("rx_ring->irq = %d\n", rx_ring->irq);
- pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
- pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
-}
-
-void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
-{
- void *ptr;
-
- pr_err("%s: Enter\n", __func__);
-
- ptr = kmalloc(size, GFP_ATOMIC);
- if (ptr == NULL)
- return;
-
- if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
- pr_err("%s: Failed to upload control block!\n", __func__);
- goto fail_it;
- }
- switch (bit) {
- case CFG_DRQ:
- ql_dump_wqicb((struct wqicb *)ptr);
- break;
- case CFG_DCQ:
- ql_dump_cqicb((struct cqicb *)ptr);
- break;
- case CFG_DR:
- ql_dump_ricb((struct ricb *)ptr);
- break;
- default:
- pr_err("%s: Invalid bit value = %x\n", __func__, bit);
- break;
- }
-fail_it:
- kfree(ptr);
-}
-#endif
-
-#ifdef QL_OB_DUMP
-void ql_dump_tx_desc(struct tx_buf_desc *tbd)
-{
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
- tbd++;
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
- tbd++;
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
-
-}
-
-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
-{
- struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
- (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
- struct tx_buf_desc *tbd;
- u16 frame_len;
-
- pr_err("%s\n", __func__);
- pr_err("opcode = %s\n",
- (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
- pr_err("flags1 = %s %s %s %s %s\n",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
- pr_err("flags2 = %s %s %s\n",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
- pr_err("flags3 = %s %s %s\n",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
- pr_err("tid = %x\n", ob_mac_iocb->tid);
- pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
- pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
- if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
- pr_err("frame_len = %d\n",
- le32_to_cpu(ob_mac_tso_iocb->frame_len));
- pr_err("mss = %d\n",
- le16_to_cpu(ob_mac_tso_iocb->mss));
- pr_err("prot_hdr_len = %d\n",
- le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
- pr_err("hdr_offset = 0x%.04x\n",
- le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
- frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
- } else {
- pr_err("frame_len = %d\n",
- le16_to_cpu(ob_mac_iocb->frame_len));
- frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
- }
- tbd = &ob_mac_iocb->tbd[0];
- ql_dump_tx_desc(tbd);
-}
-
-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
-{
- pr_err("%s\n", __func__);
- pr_err("opcode = %d\n", ob_mac_rsp->opcode);
- pr_err("flags = %s %s %s %s %s %s %s\n",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
- ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
- pr_err("tid = %x\n", ob_mac_rsp->tid);
-}
-#endif
-
-#ifdef QL_IB_DUMP
-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
-{
- pr_err("%s\n", __func__);
- pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
- pr_err("flags1 = %s%s%s%s%s%s\n",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
-
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
- pr_err("%s%s%s Multicast\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
-
- pr_err("flags2 = %s%s%s%s%s\n",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
-
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
- pr_err("%s%s%s%s%s error\n",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
-
- pr_err("flags3 = %s%s\n",
- ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
- ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
-
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
- pr_err("RSS flags = %s%s%s%s\n",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
-
- pr_err("data_len = %d\n",
- le32_to_cpu(ib_mac_rsp->data_len));
- pr_err("data_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
- pr_err("rss = %x\n",
- le32_to_cpu(ib_mac_rsp->rss));
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
- pr_err("vlan_id = %x\n",
- le16_to_cpu(ib_mac_rsp->vlan_id));
-
- pr_err("flags4 = %s%s%s\n",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
-
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
- pr_err("hdr length = %d\n",
- le32_to_cpu(ib_mac_rsp->hdr_len));
- pr_err("hdr addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
- }
-}
-#endif
-
-#ifdef QL_ALL_DUMP
-void ql_dump_all(struct ql_adapter *qdev)
-{
- int i;
-
- QL_DUMP_REGS(qdev);
- QL_DUMP_QDEV(qdev);
- for (i = 0; i < qdev->tx_ring_count; i++) {
- QL_DUMP_TX_RING(&qdev->tx_ring[i]);
- QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
- }
- for (i = 0; i < qdev->rx_ring_count; i++) {
- QL_DUMP_RX_RING(&qdev->rx_ring[i]);
- QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
- }
-}
-#endif
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
deleted file mode 100644
index a6886cc5654c..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ /dev/null
@@ -1,735 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/dmapool.h>
-#include <linux/mempool.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_vlan.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-
-
-#include "qlge.h"
-
-struct ql_stats {
- char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
-};
-
-#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
-#define QL_OFF(m) offsetof(struct ql_adapter, m)
-
-static const struct ql_stats ql_gstrings_stats[] = {
- {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
- {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
- {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
- QL_OFF(nic_stats.tx_mcast_pkts)},
- {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
- QL_OFF(nic_stats.tx_bcast_pkts)},
- {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
- QL_OFF(nic_stats.tx_ucast_pkts)},
- {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
- QL_OFF(nic_stats.tx_ctl_pkts)},
- {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
- QL_OFF(nic_stats.tx_pause_pkts)},
- {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
- QL_OFF(nic_stats.tx_64_pkt)},
- {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
- QL_OFF(nic_stats.tx_65_to_127_pkt)},
- {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
- QL_OFF(nic_stats.tx_128_to_255_pkt)},
- {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
- QL_OFF(nic_stats.tx_256_511_pkt)},
- {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
- QL_OFF(nic_stats.tx_512_to_1023_pkt)},
- {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
- QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
- {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
- QL_OFF(nic_stats.tx_1519_to_max_pkt)},
- {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
- QL_OFF(nic_stats.tx_undersize_pkt)},
- {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
- QL_OFF(nic_stats.tx_oversize_pkt)},
- {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
- {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
- QL_OFF(nic_stats.rx_bytes_ok)},
- {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
- {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
- QL_OFF(nic_stats.rx_pkts_ok)},
- {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
- QL_OFF(nic_stats.rx_bcast_pkts)},
- {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
- QL_OFF(nic_stats.rx_mcast_pkts)},
- {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
- QL_OFF(nic_stats.rx_ucast_pkts)},
- {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
- QL_OFF(nic_stats.rx_undersize_pkts)},
- {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
- QL_OFF(nic_stats.rx_oversize_pkts)},
- {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
- QL_OFF(nic_stats.rx_jabber_pkts)},
- {"rx_undersize_fcerr_pkts",
- QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
- QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
- {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
- QL_OFF(nic_stats.rx_drop_events)},
- {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
- QL_OFF(nic_stats.rx_fcerr_pkts)},
- {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
- QL_OFF(nic_stats.rx_align_err)},
- {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
- QL_OFF(nic_stats.rx_symbol_err)},
- {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
- QL_OFF(nic_stats.rx_mac_err)},
- {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
- QL_OFF(nic_stats.rx_ctl_pkts)},
- {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
- QL_OFF(nic_stats.rx_pause_pkts)},
- {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
- QL_OFF(nic_stats.rx_64_pkts)},
- {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
- QL_OFF(nic_stats.rx_65_to_127_pkts)},
- {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
- QL_OFF(nic_stats.rx_128_255_pkts)},
- {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
- QL_OFF(nic_stats.rx_256_511_pkts)},
- {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
- QL_OFF(nic_stats.rx_512_to_1023_pkts)},
- {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
- QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
- {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
- QL_OFF(nic_stats.rx_1519_to_max_pkts)},
- {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
- QL_OFF(nic_stats.rx_len_err_pkts)},
- {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
- QL_OFF(nic_stats.rx_code_err)},
- {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
- QL_OFF(nic_stats.rx_oversize_err)},
- {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
- QL_OFF(nic_stats.rx_undersize_err)},
- {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
- QL_OFF(nic_stats.rx_preamble_err)},
- {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
- QL_OFF(nic_stats.rx_frame_len_err)},
- {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
- QL_OFF(nic_stats.rx_crc_err)},
- {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
- QL_OFF(nic_stats.rx_err_count)},
- {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
- QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
- {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
- QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
- {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
- QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
- {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
- QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
- {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
- QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
- {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
- QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
- {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
- QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
- {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
- QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
- {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
- QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
- {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
- QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
- {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
- QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
- {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
- QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
- {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
- QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
- {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
- QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
- {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
- QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
- {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
- QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
- {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
- QL_OFF(nic_stats.rx_nic_fifo_drop)},
-};
-
-static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
- "Loopback test (offline)"
-};
-#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
-#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
-#define QLGE_RCV_MAC_ERR_STATS 7
-
-static int ql_update_ring_coalescing(struct ql_adapter *qdev)
-{
- int i, status = 0;
- struct rx_ring *rx_ring;
- struct cqicb *cqicb;
-
- if (!netif_running(qdev->ndev))
- return status;
-
- /* Skip the default queue, and update the outbound handler
- * queues if they changed.
- */
- cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
- if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) !=
- qdev->tx_max_coalesced_frames) {
- for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- cqicb = (struct cqicb *)rx_ring;
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay =
- cpu_to_le16(qdev->tx_max_coalesced_frames);
- cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to load CQICB.\n");
- goto exit;
- }
- }
- }
-
- /* Update the inbound (RSS) handler queues if they changed. */
- cqicb = (struct cqicb *)&qdev->rx_ring[0];
- if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) !=
- qdev->rx_max_coalesced_frames) {
- for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
- rx_ring = &qdev->rx_ring[i];
- cqicb = (struct cqicb *)rx_ring;
- cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
- cqicb->pkt_delay =
- cpu_to_le16(qdev->rx_max_coalesced_frames);
- cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to load CQICB.\n");
- goto exit;
- }
- }
- }
-exit:
- return status;
-}
-
-static void ql_update_stats(struct ql_adapter *qdev)
-{
- u32 i;
- u64 data;
- u64 *iter = &qdev->nic_stats.tx_pkts;
-
- spin_lock(&qdev->stats_lock);
- if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get xgmac sem.\n");
- goto quit;
- }
- /*
- * Get TX statistics.
- */
- for (i = 0x200; i < 0x280; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else
- *iter = data;
- iter++;
- }
-
- /*
- * Get RX statistics.
- */
- for (i = 0x300; i < 0x3d0; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else
- *iter = data;
- iter++;
- }
-
- /* Update receive mac error statistics */
- iter += QLGE_RCV_MAC_ERR_STATS;
-
- /*
- * Get Per-priority TX pause frame counter statistics.
- */
- for (i = 0x500; i < 0x540; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else
- *iter = data;
- iter++;
- }
-
- /*
- * Get Per-priority RX pause frame counter statistics.
- */
- for (i = 0x568; i < 0x5a8; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else
- *iter = data;
- iter++;
- }
-
- /*
- * Get RX NIC FIFO DROP statistics.
- */
- if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n", i);
- goto end;
- } else
- *iter = data;
-end:
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
-quit:
- spin_unlock(&qdev->stats_lock);
-
- QL_DUMP_STAT(qdev);
-}
-
-static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
-{
- int index;
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
- break;
- case ETH_SS_STATS:
- for (index = 0; index < QLGE_STATS_LEN; index++) {
- memcpy(buf + index * ETH_GSTRING_LEN,
- ql_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
- }
- break;
- }
-}
-
-static int ql_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_TEST:
- return QLGE_TEST_LEN;
- case ETH_SS_STATS:
- return QLGE_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void
-ql_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int index, length;
-
- length = QLGE_STATS_LEN;
- ql_update_stats(qdev);
-
- for (index = 0; index < length; index++) {
- char *p = (char *)qdev +
- ql_gstrings_stats[index].stat_offset;
- *data++ = (ql_gstrings_stats[index].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
- }
-}
-
-static int ql_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *ecmd)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- u32 supported, advertising;
-
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
-
- if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
- STS_LINK_TYPE_10GBASET) {
- supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
- advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
- ecmd->base.port = PORT_TP;
- ecmd->base.autoneg = AUTONEG_ENABLE;
- } else {
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
- ecmd->base.port = PORT_FIBRE;
- }
-
- ecmd->base.speed = SPEED_10000;
- ecmd->base.duplex = DUPLEX_FULL;
-
- ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
- advertising);
-
- return 0;
-}
-
-static void ql_get_drvinfo(struct net_device *ndev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, qlge_driver_version,
- sizeof(drvinfo->version));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "v%d.%d.%d",
- (qdev->fw_rev_id & 0x00ff0000) >> 16,
- (qdev->fw_rev_id & 0x0000ff00) >> 8,
- (qdev->fw_rev_id & 0x000000ff));
- strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- unsigned short ssys_dev = qdev->pdev->subsystem_device;
-
- /* WOL is only supported for mezz card. */
- if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
- ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
- wol->supported = WAKE_MAGIC;
- wol->wolopts = qdev->wol;
- }
-}
-
-static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- unsigned short ssys_dev = qdev->pdev->subsystem_device;
-
- /* WOL is only supported for mezz card. */
- if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
- ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
- netif_info(qdev, drv, qdev->ndev,
- "WOL is only supported for mezz card\n");
- return -EOPNOTSUPP;
- }
- if (wol->wolopts & ~WAKE_MAGIC)
- return -EINVAL;
- qdev->wol = wol->wolopts;
-
- netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
- return 0;
-}
-
-static int ql_set_phys_id(struct net_device *ndev,
- enum ethtool_phys_id_state state)
-
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- /* Save the current LED settings */
- if (ql_mb_get_led_cfg(qdev))
- return -EIO;
-
- /* Start blinking */
- ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
- return 0;
-
- case ETHTOOL_ID_INACTIVE:
- /* Restore LED settings */
- if (ql_mb_set_led_cfg(qdev, qdev->led_config))
- return -EIO;
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static int ql_start_loopback(struct ql_adapter *qdev)
-{
- if (netif_carrier_ok(qdev->ndev)) {
- set_bit(QL_LB_LINK_UP, &qdev->flags);
- netif_carrier_off(qdev->ndev);
- } else
- clear_bit(QL_LB_LINK_UP, &qdev->flags);
- qdev->link_config |= CFG_LOOPBACK_PCS;
- return ql_mb_set_port_cfg(qdev);
-}
-
-static void ql_stop_loopback(struct ql_adapter *qdev)
-{
- qdev->link_config &= ~CFG_LOOPBACK_PCS;
- ql_mb_set_port_cfg(qdev);
- if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
- netif_carrier_on(qdev->ndev);
- clear_bit(QL_LB_LINK_UP, &qdev->flags);
- }
-}
-
-static void ql_create_lb_frame(struct sk_buff *skb,
- unsigned int frame_size)
-{
- memset(skb->data, 0xFF, frame_size);
- frame_size &= ~1;
- memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
-}
-
-void ql_check_lb_frame(struct ql_adapter *qdev,
- struct sk_buff *skb)
-{
- unsigned int frame_size = skb->len;
-
- if ((*(skb->data + 3) == 0xFF) &&
- (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
- atomic_dec(&qdev->lb_count);
- return;
- }
-}
-
-static int ql_run_loopback_test(struct ql_adapter *qdev)
-{
- int i;
- netdev_tx_t rc;
- struct sk_buff *skb;
- unsigned int size = SMALL_BUF_MAP_SIZE;
-
- for (i = 0; i < 64; i++) {
- skb = netdev_alloc_skb(qdev->ndev, size);
- if (!skb)
- return -ENOMEM;
-
- skb->queue_mapping = 0;
- skb_put(skb, size);
- ql_create_lb_frame(skb, size);
- rc = ql_lb_send(skb, qdev->ndev);
- if (rc != NETDEV_TX_OK)
- return -EPIPE;
- atomic_inc(&qdev->lb_count);
- }
- /* Give queue time to settle before testing results. */
- msleep(2);
- ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
- return atomic_read(&qdev->lb_count) ? -EIO : 0;
-}
-
-static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
-{
- *data = ql_start_loopback(qdev);
- if (*data)
- goto out;
- *data = ql_run_loopback_test(qdev);
-out:
- ql_stop_loopback(qdev);
- return *data;
-}
-
-static void ql_self_test(struct net_device *ndev,
- struct ethtool_test *eth_test, u64 *data)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
-
- if (netif_running(ndev)) {
- set_bit(QL_SELFTEST, &qdev->flags);
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
- if (ql_loopback_test(qdev, &data[0]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- } else {
- /* Online tests */
- data[0] = 0;
- }
- clear_bit(QL_SELFTEST, &qdev->flags);
- /* Give link time to come up after
- * port configuration changes.
- */
- msleep_interruptible(4 * 1000);
- } else {
- netif_err(qdev, drv, qdev->ndev,
- "is down, Loopback test will fail.\n");
- eth_test->flags |= ETH_TEST_FL_FAILED;
- }
-}
-
-static int ql_get_regs_len(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- return sizeof(struct ql_mpi_coredump);
- else
- return sizeof(struct ql_reg_dump);
-}
-
-static void ql_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *p)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- ql_get_dump(qdev, p);
- qdev->core_is_dumped = 0;
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- regs->len = sizeof(struct ql_mpi_coredump);
- else
- regs->len = sizeof(struct ql_reg_dump);
-}
-
-static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
-{
- struct ql_adapter *qdev = netdev_priv(dev);
-
- c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
- c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
-
- /* This chip coalesces as follows:
- * If a packet arrives, hold off interrupts until
- * cqicb->int_delay expires, but if no other packets arrive don't
- * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
- * timer to coalesce on a frame basis. So, we have to take ethtool's
- * max_coalesced_frames value and convert it to a delay in microseconds.
- * We do this by using a basic thoughput of 1,000,000 frames per
- * second @ (1024 bytes). This means one frame per usec. So it's a
- * simple one to one ratio.
- */
- c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
- c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
-
- return 0;
-}
-
-static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- /* Validate user parameters. */
- if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
- return -EINVAL;
- /* Don't wait more than 10 usec. */
- if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
- return -EINVAL;
- if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
- return -EINVAL;
- if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
- return -EINVAL;
-
- /* Verify a change took place before updating the hardware. */
- if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
- qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
- qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
- qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
- return 0;
-
- qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
- qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
- qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
- qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
-
- return ql_update_ring_coalescing(qdev);
-}
-
-static void ql_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ql_adapter *qdev = netdev_priv(netdev);
-
- ql_mb_get_port_cfg(qdev);
- if (qdev->link_config & CFG_PAUSE_STD) {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
-}
-
-static int ql_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ql_adapter *qdev = netdev_priv(netdev);
- int status = 0;
-
- if ((pause->rx_pause) && (pause->tx_pause))
- qdev->link_config |= CFG_PAUSE_STD;
- else if (!pause->rx_pause && !pause->tx_pause)
- qdev->link_config &= ~CFG_PAUSE_STD;
- else
- return -EINVAL;
-
- status = ql_mb_set_port_cfg(qdev);
- return status;
-}
-
-static u32 ql_get_msglevel(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- return qdev->msg_enable;
-}
-
-static void ql_set_msglevel(struct net_device *ndev, u32 value)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- qdev->msg_enable = value;
-}
-
-const struct ethtool_ops qlge_ethtool_ops = {
- .get_drvinfo = ql_get_drvinfo,
- .get_wol = ql_get_wol,
- .set_wol = ql_set_wol,
- .get_regs_len = ql_get_regs_len,
- .get_regs = ql_get_regs,
- .get_msglevel = ql_get_msglevel,
- .set_msglevel = ql_set_msglevel,
- .get_link = ethtool_op_get_link,
- .set_phys_id = ql_set_phys_id,
- .self_test = ql_self_test,
- .get_pauseparam = ql_get_pauseparam,
- .set_pauseparam = ql_set_pauseparam,
- .get_coalesce = ql_get_coalesce,
- .set_coalesce = ql_set_coalesce,
- .get_sset_count = ql_get_sset_count,
- .get_strings = ql_get_strings,
- .get_ethtool_stats = ql_get_ethtool_stats,
- .get_link_ksettings = ql_get_link_ksettings,
-};
-
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
deleted file mode 100644
index 6cae33072496..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ /dev/null
@@ -1,5027 +0,0 @@
-/*
- * QLogic qlge NIC HBA Driver
- * Copyright (c) 2003-2008 QLogic Corporation
- * See LICENSE.qlge for copyright and licensing details.
- * Author: Linux qlge network device driver by
- * Ron Mercer <ron.mercer@qlogic.com>
- */
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/dmapool.h>
-#include <linux/mempool.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/prefetch.h>
-#include <net/ip6_checksum.h>
-
-#include "qlge.h"
-
-char qlge_driver_name[] = DRV_NAME;
-const char qlge_driver_version[] = DRV_VERSION;
-
-MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
-MODULE_DESCRIPTION(DRV_STRING " ");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static const u32 default_msg =
- NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
-/* NETIF_MSG_TIMER | */
- NETIF_MSG_IFDOWN |
- NETIF_MSG_IFUP |
- NETIF_MSG_RX_ERR |
- NETIF_MSG_TX_ERR |
-/* NETIF_MSG_TX_QUEUED | */
-/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
-/* NETIF_MSG_PKTDATA | */
- NETIF_MSG_HW | NETIF_MSG_WOL | 0;
-
-static int debug = -1; /* defaults above */
-module_param(debug, int, 0664);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-#define MSIX_IRQ 0
-#define MSI_IRQ 1
-#define LEG_IRQ 2
-static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, 0664);
-MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
-
-static int qlge_mpi_coredump;
-module_param(qlge_mpi_coredump, int, 0);
-MODULE_PARM_DESC(qlge_mpi_coredump,
- "Option to enable MPI firmware dump. "
- "Default is OFF - Do Not allocate memory. ");
-
-static int qlge_force_coredump;
-module_param(qlge_force_coredump, int, 0);
-MODULE_PARM_DESC(qlge_force_coredump,
- "Option to allow force of firmware core dump. "
- "Default is OFF - Do not allow.");
-
-static const struct pci_device_id qlge_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
- /* required last entry */
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
-
-static int ql_wol(struct ql_adapter *);
-static void qlge_set_multicast_list(struct net_device *);
-static int ql_adapter_down(struct ql_adapter *);
-static int ql_adapter_up(struct ql_adapter *);
-
-/* This hardware semaphore causes exclusive access to
- * resources shared between the NIC driver, MPI firmware,
- * FCOE firmware and the FC driver.
- */
-static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
-{
- u32 sem_bits = 0;
-
- switch (sem_mask) {
- case SEM_XGMAC0_MASK:
- sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
- break;
- case SEM_XGMAC1_MASK:
- sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
- break;
- case SEM_ICB_MASK:
- sem_bits = SEM_SET << SEM_ICB_SHIFT;
- break;
- case SEM_MAC_ADDR_MASK:
- sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
- break;
- case SEM_FLASH_MASK:
- sem_bits = SEM_SET << SEM_FLASH_SHIFT;
- break;
- case SEM_PROBE_MASK:
- sem_bits = SEM_SET << SEM_PROBE_SHIFT;
- break;
- case SEM_RT_IDX_MASK:
- sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
- break;
- case SEM_PROC_REG_MASK:
- sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
- break;
- default:
- netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
- return -EINVAL;
- }
-
- ql_write32(qdev, SEM, sem_bits | sem_mask);
- return !(ql_read32(qdev, SEM) & sem_bits);
-}
-
-int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
-{
- unsigned int wait_count = 30;
- do {
- if (!ql_sem_trylock(qdev, sem_mask))
- return 0;
- udelay(100);
- } while (--wait_count);
- return -ETIMEDOUT;
-}
-
-void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
-{
- ql_write32(qdev, SEM, sem_mask);
- ql_read32(qdev, SEM); /* flush */
-}
-
-/* This function waits for a specific bit to come ready
- * in a given register. It is used mostly by the initialize
- * process, but is also used in kernel thread API such as
- * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
- */
-int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
-{
- u32 temp;
- int count = UDELAY_COUNT;
-
- while (count) {
- temp = ql_read32(qdev, reg);
-
- /* check for errors */
- if (temp & err_bit) {
- netif_alert(qdev, probe, qdev->ndev,
- "register 0x%.08x access error, value = 0x%.08x!.\n",
- reg, temp);
- return -EIO;
- } else if (temp & bit)
- return 0;
- udelay(UDELAY_DELAY);
- count--;
- }
- netif_alert(qdev, probe, qdev->ndev,
- "Timed out waiting for reg %x to come ready.\n", reg);
- return -ETIMEDOUT;
-}
-
-/* The CFG register is used to download TX and RX control blocks
- * to the chip. This function waits for an operation to complete.
- */
-static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
-{
- int count = UDELAY_COUNT;
- u32 temp;
-
- while (count) {
- temp = ql_read32(qdev, CFG);
- if (temp & CFG_LE)
- return -EIO;
- if (!(temp & bit))
- return 0;
- udelay(UDELAY_DELAY);
- count--;
- }
- return -ETIMEDOUT;
-}
-
-
-/* Used to issue init control blocks to hw. Maps control block,
- * sets address, triggers download, waits for completion.
- */
-int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id)
-{
- u64 map;
- int status = 0;
- int direction;
- u32 mask;
- u32 value;
-
- direction =
- (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE;
-
- map = pci_map_single(qdev->pdev, ptr, size, direction);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
- return -ENOMEM;
- }
-
- status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
- if (status)
- return status;
-
- status = ql_wait_cfg(qdev, bit);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Timed out waiting for CFG to come ready.\n");
- goto exit;
- }
-
- ql_write32(qdev, ICB_L, (u32) map);
- ql_write32(qdev, ICB_H, (u32) (map >> 32));
-
- mask = CFG_Q_MASK | (bit << 16);
- value = bit | (q_id << CFG_Q_SHIFT);
- ql_write32(qdev, CFG, (mask | value));
-
- /*
- * Wait for the bit to clear after signaling hw.
- */
- status = ql_wait_cfg(qdev, bit);
-exit:
- ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
- pci_unmap_single(qdev->pdev, map, size, direction);
- return status;
-}
-
-/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
-int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value)
-{
- u32 offset = 0;
- int status;
-
- switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- if (type == MAC_ADDR_TYPE_CAM_MAC) {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- }
- break;
- }
- case MAC_ADDR_TYPE_VLAN:
- case MAC_ADDR_TYPE_MULTI_FLTR:
- default:
- netif_crit(qdev, ifup, qdev->ndev,
- "Address type %d not yet supported.\n", type);
- status = -EPERM;
- }
-exit:
- return status;
-}
-
-/* Set up a MAC, multicast or VLAN address for the
- * inbound frame matching.
- */
-static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
- u16 index)
-{
- u32 offset = 0;
- int status = 0;
-
- switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- {
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower = (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | (addr[5]);
-
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
-
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- break;
- }
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- u32 cam_output;
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower =
- (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
- (addr[5]);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- /* This field should also include the queue id
- and possibly the function id. Right now we hardcode
- the route field to NIC core.
- */
- cam_output = (CAM_OUT_ROUTE_NIC |
- (qdev->
- func << CAM_OUT_FUNC_SHIFT) |
- (0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- cam_output |= CAM_OUT_RV;
- /* route to NIC core */
- ql_write32(qdev, MAC_ADDR_DATA, cam_output);
- break;
- }
- case MAC_ADDR_TYPE_VLAN:
- {
- u32 enable_bit = *((u32 *) &addr[0]);
- /* For VLAN, the addr actually holds a bit that
- * either enables or disables the vlan id we are
- * addressing. It's either MAC_ADDR_E on or off.
- * That's bit-27 we're talking about.
- */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type | /* type */
- enable_bit); /* enable/disable */
- break;
- }
- case MAC_ADDR_TYPE_MULTI_FLTR:
- default:
- netif_crit(qdev, ifup, qdev->ndev,
- "Address type %d not yet supported.\n", type);
- status = -EPERM;
- }
-exit:
- return status;
-}
-
-/* Set or clear MAC address in hardware. We sometimes
- * have to clear it to prevent wrong frame routing
- * especially in a bonding environment.
- */
-static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
-{
- int status;
- char zero_mac_addr[ETH_ALEN];
- char *addr;
-
- if (set) {
- addr = &qdev->current_mac_addr[0];
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Set Mac addr %pM\n", addr);
- } else {
- eth_zero_addr(zero_mac_addr);
- addr = &zero_mac_addr[0];
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Clearing MAC address\n");
- }
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init mac address.\n");
- return status;
-}
-
-void ql_link_on(struct ql_adapter *qdev)
-{
- netif_err(qdev, link, qdev->ndev, "Link is up.\n");
- netif_carrier_on(qdev->ndev);
- ql_set_mac_addr(qdev, 1);
-}
-
-void ql_link_off(struct ql_adapter *qdev)
-{
- netif_err(qdev, link, qdev->ndev, "Link is down.\n");
- netif_carrier_off(qdev->ndev);
- ql_set_mac_addr(qdev, 0);
-}
-
-/* Get a specific frame routing value from the CAM.
- * Used for debug and reg dump.
- */
-int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
-{
- int status = 0;
-
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
- if (status)
- goto exit;
-
- ql_write32(qdev, RT_IDX,
- RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
- if (status)
- goto exit;
- *value = ql_read32(qdev, RT_DATA);
-exit:
- return status;
-}
-
-/* The NIC function for this chip has 16 routing indexes. Each one can be used
- * to route different frame types to various inbound queues. We send broadcast/
- * multicast/error frames to the default queue for slow handling,
- * and CAM hit/RSS frames to the fast handling queues.
- */
-static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
- int enable)
-{
- int status = -EINVAL; /* Return error if no mask match. */
- u32 value = 0;
-
- switch (mask) {
- case RT_IDX_CAM_HIT:
- {
- value = RT_IDX_DST_CAM_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_VALID: /* Promiscuous Mode frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_IP_CSUM_ERR_SLOT <<
- RT_IDX_IDX_SHIFT); /* index */
- break;
- }
- case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
- RT_IDX_IDX_SHIFT); /* index */
- break;
- }
- case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_MCAST: /* Pass up All Multicast frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
- {
- value = RT_IDX_DST_RSS | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case 0: /* Clear the E-bit on an entry. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (index << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- default:
- netif_err(qdev, ifup, qdev->ndev,
- "Mask type %d not yet supported.\n", mask);
- status = -EPERM;
- goto exit;
- }
-
- if (value) {
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
- if (status)
- goto exit;
- value |= (enable ? RT_IDX_E : 0);
- ql_write32(qdev, RT_IDX, value);
- ql_write32(qdev, RT_DATA, enable ? mask : 0);
- }
-exit:
- return status;
-}
-
-static void ql_enable_interrupts(struct ql_adapter *qdev)
-{
- ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
-}
-
-static void ql_disable_interrupts(struct ql_adapter *qdev)
-{
- ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
-}
-
-/* If we're running with multiple MSI-X vectors then we enable on the fly.
- * Otherwise, we may have multiple outstanding workers and don't want to
- * enable until the last one finishes. In this case, the irq_cnt gets
- * incremented every time we queue a worker and decremented every time
- * a worker finishes. Once it hits zero we enable the interrupt.
- */
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
-{
- u32 var = 0;
- unsigned long hw_flags = 0;
- struct intr_context *ctx = qdev->intr_context + intr;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
- /* Always enable if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- return var;
- }
-
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- if (atomic_dec_and_test(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- }
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return var;
-}
-
-static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
-{
- u32 var = 0;
- struct intr_context *ctx;
-
- /* HW disables for us if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
- return 0;
-
- ctx = qdev->intr_context + intr;
- spin_lock(&qdev->hw_lock);
- if (!atomic_read(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_dis_mask);
- var = ql_read32(qdev, STS);
- }
- atomic_inc(&ctx->irq_cnt);
- spin_unlock(&qdev->hw_lock);
- return var;
-}
-
-static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
-{
- int i;
- for (i = 0; i < qdev->intr_count; i++) {
- /* The enable call does a atomic_dec_and_test
- * and enables only if the result is zero.
- * So we precharge it here.
- */
- if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
- i == 0))
- atomic_set(&qdev->intr_context[i].irq_cnt, 1);
- ql_enable_completion_interrupt(qdev, i);
- }
-
-}
-
-static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
-{
- int status, i;
- u16 csum = 0;
- __le16 *flash = (__le16 *)&qdev->flash;
-
- status = strncmp((char *)&qdev->flash, str, 4);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
- return status;
- }
-
- for (i = 0; i < size; i++)
- csum += le16_to_cpu(*flash++);
-
- if (csum)
- netif_err(qdev, ifup, qdev->ndev,
- "Invalid flash checksum, csum = 0x%.04x.\n", csum);
-
- return csum;
-}
-
-static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
- if (status)
- goto exit;
- /* set up for reg read */
- ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
- if (status)
- goto exit;
- /* This data is stored on flash as an array of
- * __le32. Since ql_read32() returns cpu endian
- * we need to swap it back.
- */
- *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
-exit:
- return status;
-}
-
-static int ql_get_8000_flash_params(struct ql_adapter *qdev)
-{
- u32 i, size;
- int status;
- __le32 *p = (__le32 *)&qdev->flash;
- u32 offset;
- u8 mac_addr[6];
-
- /* Get flash offset for function and adjust
- * for dword access.
- */
- if (!qdev->port)
- offset = FUNC0_FLASH_OFFSET / sizeof(u32);
- else
- offset = FUNC1_FLASH_OFFSET / sizeof(u32);
-
- if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
- return -ETIMEDOUT;
-
- size = sizeof(struct flash_params_8000) / sizeof(u32);
- for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Error reading flash.\n");
- goto exit;
- }
- }
-
- status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8000) / sizeof(u16),
- "8000");
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
- status = -EINVAL;
- goto exit;
- }
-
- /* Extract either manufacturer or BOFM modified
- * MAC address.
- */
- if (qdev->flash.flash_params_8000.data_type1 == 2)
- memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr1,
- qdev->ndev->addr_len);
- else
- memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr,
- qdev->ndev->addr_len);
-
- if (!is_valid_ether_addr(mac_addr)) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
- status = -EINVAL;
- goto exit;
- }
-
- memcpy(qdev->ndev->dev_addr,
- mac_addr,
- qdev->ndev->addr_len);
-
-exit:
- ql_sem_unlock(qdev, SEM_FLASH_MASK);
- return status;
-}
-
-static int ql_get_8012_flash_params(struct ql_adapter *qdev)
-{
- int i;
- int status;
- __le32 *p = (__le32 *)&qdev->flash;
- u32 offset = 0;
- u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
-
- /* Second function's parameters follow the first
- * function's.
- */
- if (qdev->port)
- offset = size;
-
- if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
- return -ETIMEDOUT;
-
- for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Error reading flash.\n");
- goto exit;
- }
-
- }
-
- status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8012) / sizeof(u16),
- "8012");
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
- status = -EINVAL;
- goto exit;
- }
-
- if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
- status = -EINVAL;
- goto exit;
- }
-
- memcpy(qdev->ndev->dev_addr,
- qdev->flash.flash_params_8012.mac_addr,
- qdev->ndev->addr_len);
-
-exit:
- ql_sem_unlock(qdev, SEM_FLASH_MASK);
- return status;
-}
-
-/* xgmac register are located behind the xgmac_addr and xgmac_data
- * register pair. Each read/write requires us to wait for the ready
- * bit before reading/writing the data.
- */
-static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
-{
- int status;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- return status;
- /* write the data to the data reg */
- ql_write32(qdev, XGMAC_DATA, data);
- /* trigger the write */
- ql_write32(qdev, XGMAC_ADDR, reg);
- return status;
-}
-
-/* xgmac register are located behind the xgmac_addr and xgmac_data
- * register pair. Each read/write requires us to wait for the ready
- * bit before reading/writing the data.
- */
-int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
- /* set up for reg read */
- ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
- /* get the data */
- *data = ql_read32(qdev, XGMAC_DATA);
-exit:
- return status;
-}
-
-/* This is used for reading the 64-bit statistics regs. */
-int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
-{
- int status = 0;
- u32 hi = 0;
- u32 lo = 0;
-
- status = ql_read_xgmac_reg(qdev, reg, &lo);
- if (status)
- goto exit;
-
- status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
- if (status)
- goto exit;
-
- *data = (u64) lo | ((u64) hi << 32);
-
-exit:
- return status;
-}
-
-static int ql_8000_port_initialize(struct ql_adapter *qdev)
-{
- int status;
- /*
- * Get MPI firmware version for driver banner
- * and ethool info.
- */
- status = ql_mb_about_fw(qdev);
- if (status)
- goto exit;
- status = ql_mb_get_fw_state(qdev);
- if (status)
- goto exit;
- /* Wake up a worker to get/set the TX/RX frame sizes. */
- queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
-exit:
- return status;
-}
-
-/* Take the MAC Core out of reset.
- * Enable statistics counting.
- * Take the transmitter/receiver out of reset.
- * This functionality may be done in the MPI firmware at a
- * later date.
- */
-static int ql_8012_port_initialize(struct ql_adapter *qdev)
-{
- int status = 0;
- u32 data;
-
- if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
- /* Another function has the semaphore, so
- * wait for the port init bit to come ready.
- */
- netif_info(qdev, link, qdev->ndev,
- "Another function has the semaphore, so wait for the port init bit to come ready.\n");
- status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
- if (status) {
- netif_crit(qdev, link, qdev->ndev,
- "Port initialize timed out.\n");
- }
- return status;
- }
-
- netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
- /* Set the core reset. */
- status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
- if (status)
- goto end;
- data |= GLOBAL_CFG_RESET;
- status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
- if (status)
- goto end;
-
- /* Clear the core reset and turn on jumbo for receiver. */
- data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
- data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
- data |= GLOBAL_CFG_TX_STAT_EN;
- data |= GLOBAL_CFG_RX_STAT_EN;
- status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
- if (status)
- goto end;
-
- /* Enable transmitter, and clear it's reset. */
- status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
- if (status)
- goto end;
- data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
- data |= TX_CFG_EN; /* Enable the transmitter. */
- status = ql_write_xgmac_reg(qdev, TX_CFG, data);
- if (status)
- goto end;
-
- /* Enable receiver and clear it's reset. */
- status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
- if (status)
- goto end;
- data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
- data |= RX_CFG_EN; /* Enable the receiver. */
- status = ql_write_xgmac_reg(qdev, RX_CFG, data);
- if (status)
- goto end;
-
- /* Turn on jumbo. */
- status =
- ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
- if (status)
- goto end;
- status =
- ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
- if (status)
- goto end;
-
- /* Signal to the world that the port is enabled. */
- ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
-end:
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
- return status;
-}
-
-static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
-{
- return PAGE_SIZE << qdev->lbq_buf_order;
-}
-
-/* Get the next large buffer. */
-static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
-{
- struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
- rx_ring->lbq_curr_idx++;
- if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_free_cnt++;
- return lbq_desc;
-}
-
-static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
-
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(lbq_desc, mapaddr),
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
-
- /* If it's the last chunk of our master page then
- * we unmap it.
- */
- if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
- == ql_lbq_block_size(qdev))
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- return lbq_desc;
-}
-
-/* Get the next small buffer. */
-static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
-{
- struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
- rx_ring->sbq_curr_idx++;
- if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_free_cnt++;
- return sbq_desc;
-}
-
-/* Update an rx ring index. */
-static void ql_update_cq(struct rx_ring *rx_ring)
-{
- rx_ring->cnsmr_idx++;
- rx_ring->curr_entry++;
- if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
- rx_ring->cnsmr_idx = 0;
- rx_ring->curr_entry = rx_ring->cq_base;
- }
-}
-
-static void ql_write_cq_idx(struct rx_ring *rx_ring)
-{
- ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
-}
-
-static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
- struct bq_desc *lbq_desc)
-{
- if (!rx_ring->pg_chunk.page) {
- u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
- qdev->lbq_buf_order);
- if (unlikely(!rx_ring->pg_chunk.page)) {
- netif_err(qdev, drv, qdev->ndev,
- "page allocation failed.\n");
- return -ENOMEM;
- }
- rx_ring->pg_chunk.offset = 0;
- map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
- 0, ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- __free_pages(rx_ring->pg_chunk.page,
- qdev->lbq_buf_order);
- rx_ring->pg_chunk.page = NULL;
- netif_err(qdev, drv, qdev->ndev,
- "PCI mapping failed.\n");
- return -ENOMEM;
- }
- rx_ring->pg_chunk.map = map;
- rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
- }
-
- /* Copy the current master pg_chunk info
- * to the current descriptor.
- */
- lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
-
- /* Adjust the master page chunk for next
- * buffer get.
- */
- rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
- if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
- rx_ring->pg_chunk.page = NULL;
- lbq_desc->p.pg_chunk.last_flag = 1;
- } else {
- rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
- get_page(rx_ring->pg_chunk.page);
- lbq_desc->p.pg_chunk.last_flag = 0;
- }
- return 0;
-}
-/* Process (refill) a large buffer queue. */
-static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- u32 clean_idx = rx_ring->lbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *lbq_desc;
- u64 map;
- int i;
-
- while (rx_ring->lbq_free_cnt > 32) {
- for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- lbq_desc = &rx_ring->lbq[clean_idx];
- if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- rx_ring->lbq_clean_idx = clean_idx;
- netif_err(qdev, ifup, qdev->ndev,
- "Could not get a page chunk, i=%d, clean_idx =%d .\n",
- i, clean_idx);
- return;
- }
-
- map = lbq_desc->p.pg_chunk.map +
- lbq_desc->p.pg_chunk.offset;
- dma_unmap_addr_set(lbq_desc, mapaddr, map);
- dma_unmap_len_set(lbq_desc, maplen,
- rx_ring->lbq_buf_size);
- *lbq_desc->addr = cpu_to_le64(map);
-
- pci_dma_sync_single_for_device(qdev->pdev, map,
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
- clean_idx++;
- if (clean_idx == rx_ring->lbq_len)
- clean_idx = 0;
- }
-
- rx_ring->lbq_clean_idx = clean_idx;
- rx_ring->lbq_prod_idx += 16;
- if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_free_cnt -= 16;
- }
-
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
- ql_write_db_reg(rx_ring->lbq_prod_idx,
- rx_ring->lbq_prod_idx_db_reg);
- }
-}
-
-/* Process (refill) a small buffer queue. */
-static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- u32 clean_idx = rx_ring->sbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *sbq_desc;
- u64 map;
- int i;
-
- while (rx_ring->sbq_free_cnt > 16) {
- for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
- sbq_desc = &rx_ring->sbq[clean_idx];
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- if (sbq_desc->p.skb == NULL) {
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
- sbq_desc->p.skb =
- netdev_alloc_skb(qdev->ndev,
- SMALL_BUFFER_SIZE);
- if (sbq_desc->p.skb == NULL) {
- rx_ring->sbq_clean_idx = clean_idx;
- return;
- }
- skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
- map = pci_map_single(qdev->pdev,
- sbq_desc->p.skb->data,
- rx_ring->sbq_buf_size,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev,
- "PCI mapping failed.\n");
- rx_ring->sbq_clean_idx = clean_idx;
- dev_kfree_skb_any(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- return;
- }
- dma_unmap_addr_set(sbq_desc, mapaddr, map);
- dma_unmap_len_set(sbq_desc, maplen,
- rx_ring->sbq_buf_size);
- *sbq_desc->addr = cpu_to_le64(map);
- }
-
- clean_idx++;
- if (clean_idx == rx_ring->sbq_len)
- clean_idx = 0;
- }
- rx_ring->sbq_clean_idx = clean_idx;
- rx_ring->sbq_prod_idx += 16;
- if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_free_cnt -= 16;
- }
-
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
- ql_write_db_reg(rx_ring->sbq_prod_idx,
- rx_ring->sbq_prod_idx_db_reg);
- }
-}
-
-static void ql_update_buffer_queues(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- ql_update_sbq(qdev, rx_ring);
- ql_update_lbq(qdev, rx_ring);
-}
-
-/* Unmaps tx buffers. Can be called from send() if a pci mapping
- * fails at some stage, or from the interrupt when a tx completes.
- */
-static void ql_unmap_send(struct ql_adapter *qdev,
- struct tx_ring_desc *tx_ring_desc, int mapped)
-{
- int i;
- for (i = 0; i < mapped; i++) {
- if (i == 0 || (i == 7 && mapped > 7)) {
- /*
- * Unmap the skb->data area, or the
- * external sglist (AKA the Outbound
- * Address List (OAL)).
- * If its the zeroeth element, then it's
- * the skb->data area. If it's the 7th
- * element and there is more than 6 frags,
- * then its an OAL.
- */
- if (i == 7) {
- netif_printk(qdev, tx_done, KERN_DEBUG,
- qdev->ndev,
- "unmapping OAL area.\n");
- }
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_ring_desc->map[i],
- mapaddr),
- dma_unmap_len(&tx_ring_desc->map[i],
- maplen),
- PCI_DMA_TODEVICE);
- } else {
- netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
- "unmapping frag %d.\n", i);
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_ring_desc->map[i],
- mapaddr),
- dma_unmap_len(&tx_ring_desc->map[i],
- maplen), PCI_DMA_TODEVICE);
- }
- }
-
-}
-
-/* Map the buffers for this transmit. This will return
- * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
- */
-static int ql_map_send(struct ql_adapter *qdev,
- struct ob_mac_iocb_req *mac_iocb_ptr,
- struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
-{
- int len = skb_headlen(skb);
- dma_addr_t map;
- int frag_idx, err, map_idx = 0;
- struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
- int frag_cnt = skb_shinfo(skb)->nr_frags;
-
- if (frag_cnt) {
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "frag_cnt = %d.\n", frag_cnt);
- }
- /*
- * Map the skb buffer first.
- */
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
-
- err = pci_dma_mapping_error(qdev->pdev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping failed with error: %d\n", err);
-
- return NETDEV_TX_BUSY;
- }
-
- tbd->len = cpu_to_le32(len);
- tbd->addr = cpu_to_le64(map);
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
- map_idx++;
-
- /*
- * This loop fills the remainder of the 8 address descriptors
- * in the IOCB. If there are more than 7 fragments, then the
- * eighth address desc will point to an external list (OAL).
- * When this happens, the remainder of the frags will be stored
- * in this list.
- */
- for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
- tbd++;
- if (frag_idx == 6 && frag_cnt > 7) {
- /* Let's tack on an sglist.
- * Our control block will now
- * look like this:
- * iocb->seg[0] = skb->data
- * iocb->seg[1] = frag[0]
- * iocb->seg[2] = frag[1]
- * iocb->seg[3] = frag[2]
- * iocb->seg[4] = frag[3]
- * iocb->seg[5] = frag[4]
- * iocb->seg[6] = frag[5]
- * iocb->seg[7] = ptr to OAL (external sglist)
- * oal->seg[0] = frag[6]
- * oal->seg[1] = frag[7]
- * oal->seg[2] = frag[8]
- * oal->seg[3] = frag[9]
- * oal->seg[4] = frag[10]
- * etc...
- */
- /* Tack on the OAL in the eighth segment of IOCB. */
- map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
- sizeof(struct oal),
- PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping outbound address list with error: %d\n",
- err);
- goto map_error;
- }
-
- tbd->addr = cpu_to_le64(map);
- /*
- * The length is the number of fragments
- * that remain to be mapped times the length
- * of our sglist (OAL).
- */
- tbd->len =
- cpu_to_le32((sizeof(struct tx_buf_desc) *
- (frag_cnt - frag_idx)) | TX_DESC_C);
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
- map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- sizeof(struct oal));
- tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
- map_idx++;
- }
-
- map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
-
- err = dma_mapping_error(&qdev->pdev->dev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping frags failed with error: %d.\n",
- err);
- goto map_error;
- }
-
- tbd->addr = cpu_to_le64(map);
- tbd->len = cpu_to_le32(skb_frag_size(frag));
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- skb_frag_size(frag));
-
- }
- /* Save the number of segments we've mapped. */
- tx_ring_desc->map_cnt = map_idx;
- /* Terminate the last segment. */
- tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
- return NETDEV_TX_OK;
-
-map_error:
- /*
- * If the first frag mapping failed, then i will be zero.
- * This causes the unmap of the skb->data area. Otherwise
- * we pass in the number of frags that mapped successfully
- * so they can be umapped.
- */
- ql_unmap_send(qdev, tx_ring_desc, map_idx);
- return NETDEV_TX_BUSY;
-}
-
-/* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
- struct rx_ring *rx_ring)
-{
- struct nic_stats *stats = &qdev->nic_stats;
-
- stats->rx_err_count++;
- rx_ring->rx_errors++;
-
- switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
- case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
- stats->rx_code_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
- stats->rx_oversize_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
- stats->rx_undersize_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
- stats->rx_preamble_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
- stats->rx_frame_len_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_CRC:
- stats->rx_crc_err++;
- default:
- break;
- }
-}
-
-/**
- * ql_update_mac_hdr_len - helper routine to update the mac header length
- * based on vlan tags if present
- */
-static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- void *page, size_t *len)
-{
- u16 *tags;
-
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- return;
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
- tags = (u16 *)page;
- /* Look for stacked vlan tags in ethertype field */
- if (tags[6] == ETH_P_8021Q &&
- tags[8] == ETH_P_8021Q)
- *len += 2 * VLAN_HLEN;
- else
- *len += VLAN_HLEN;
- }
-}
-
-/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
-{
- struct sk_buff *skb;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- struct napi_struct *napi = &rx_ring->napi;
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- napi->dev = qdev->ndev;
-
- skb = napi_get_frags(napi);
- if (!skb) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get an skb, exiting.\n");
- rx_ring->rx_dropped++;
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- prefetch(lbq_desc->p.pg_chunk.va);
- __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
-
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- skb_shinfo(skb)->nr_frags++;
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += length;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- napi_gro_frags(napi);
-}
-
-/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
-{
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- void *addr;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- struct napi_struct *napi = &rx_ring->napi;
- size_t hlen = ETH_HLEN;
-
- skb = netdev_alloc_skb(ndev, length);
- if (!skb) {
- rx_ring->rx_dropped++;
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
-
- addr = lbq_desc->p.pg_chunk.va;
- prefetch(addr);
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- goto err_out;
- }
-
- /* Update the MAC header length*/
- ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + hlen) {
- netif_err(qdev, drv, qdev->ndev,
- "Segment too small, dropping.\n");
- rx_ring->rx_dropped++;
- goto err_out;
- }
- skb_put_data(skb, addr, hlen);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
- length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset + hlen,
- length - hlen);
- skb->len += length - hlen;
- skb->data_len += length - hlen;
- skb->truesize += length - hlen;
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph =
- (struct iphdr *)((u8 *)addr + hlen);
- if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "UDP checksum done!\n");
- }
- }
- }
-
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(napi, skb);
- else
- netif_receive_skb(skb);
- return;
-err_out:
- dev_kfree_skb_any(skb);
- put_page(lbq_desc->p.pg_chunk.page);
-}
-
-/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
-{
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- struct sk_buff *new_skb = NULL;
- struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
-
- skb = sbq_desc->p.skb;
- /* Allocate new_skb and copy */
- new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
- if (new_skb == NULL) {
- rx_ring->rx_dropped++;
- return;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
-
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
-
- skb_put_data(new_skb, skb->data, length);
-
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb = new_skb;
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* loopback self test for ethtool */
- if (test_bit(QL_SELFTEST, &qdev->flags)) {
- ql_check_lb_frame(qdev, skb);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + ETH_HLEN) {
- dev_kfree_skb_any(skb);
- rx_ring->rx_dropped++;
- return;
- }
-
- prefetch(skb->data);
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
- }
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Promiscuous Packet.\n");
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- /* If rx checksum is on, and there are no
- * csum or frame errors.
- */
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
- if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "UDP checksum done!\n");
- }
- }
- }
-
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ring->napi, skb);
- else
- netif_receive_skb(skb);
-}
-
-static void ql_realign_skb(struct sk_buff *skb, int len)
-{
- void *temp_addr = skb->data;
-
- /* Undo the skb_reserve(skb,32) we did before
- * giving to hardware, and realign data on
- * a 2-byte boundary.
- */
- skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
- skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
- memmove(skb->data, temp_addr, len);
-}
-
-/*
- * This function builds an skb for the given inbound
- * completion. It will be rewritten for readability in the near
- * future, but for not it works well.
- */
-static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
-{
- struct bq_desc *lbq_desc;
- struct bq_desc *sbq_desc;
- struct sk_buff *skb = NULL;
- u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
- size_t hlen = ETH_HLEN;
-
- /*
- * Handle the header buffer if present.
- */
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Header of %d bytes in small buffer.\n", hdr_len);
- /*
- * Headers fit nicely into a small buffer.
- */
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb = sbq_desc->p.skb;
- ql_realign_skb(skb, hdr_len);
- skb_put(skb, hdr_len);
- sbq_desc->p.skb = NULL;
- }
-
- /*
- * Handle the data buffer(s).
- */
- if (unlikely(!length)) { /* Is there data too? */
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "No Data buffer in this packet.\n");
- return skb;
- }
-
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Headers in small, data of %d bytes in small, combine them.\n",
- length);
- /*
- * Data is less than small buffer size so it's
- * stuffed in a small buffer.
- * For this case we append the data
- * from the "data" small buffer to the "header" small
- * buffer.
- */
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr
- (sbq_desc, mapaddr),
- dma_unmap_len
- (sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb_put_data(skb, sbq_desc->p.skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr
- (sbq_desc,
- mapaddr),
- dma_unmap_len
- (sbq_desc,
- maplen),
- PCI_DMA_FROMDEVICE);
- } else {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes in a single small buffer.\n",
- length);
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- skb = sbq_desc->p.skb;
- ql_realign_skb(skb, length);
- skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc,
- mapaddr),
- dma_unmap_len(sbq_desc,
- maplen),
- PCI_DMA_FROMDEVICE);
- sbq_desc->p.skb = NULL;
- }
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Header in small, %d bytes in large. Chain large to small!\n",
- length);
- /*
- * The data is in a single large buffer. We
- * chain it to the header buffer's skb and let
- * it rip.
- */
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Chaining page at offset = %d, for %d bytes to skb.\n",
- lbq_desc->p.pg_chunk.offset, length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- } else {
- /*
- * The headers and data are in a single large buffer. We
- * copy it to a new skb and let it go. This can happen with
- * jumbo mtu on a non-TCP/UDP frame.
- */
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- skb = netdev_alloc_skb(qdev->ndev, length);
- if (skb == NULL) {
- netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
- "No skb available, drop the packet.\n");
- return NULL;
- }
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(lbq_desc,
- mapaddr),
- dma_unmap_len(lbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb_reserve(skb, NET_IP_ALIGN);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
- length);
- skb_fill_page_desc(skb, 0,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- ql_update_mac_hdr_len(qdev, ib_mac_rsp,
- lbq_desc->p.pg_chunk.va,
- &hlen);
- __pskb_pull_tail(skb, hlen);
- }
- } else {
- /*
- * The data is in a chain of large buffers
- * pointed to by a small buffer. We loop
- * thru and chain them to the our small header
- * buffer's skb.
- * frags: There are 18 max frags and our small
- * buffer will hold 32 of them. The thing is,
- * we'll use 3 max for our 9000 byte jumbo
- * frames. If the MTU goes up we could
- * eventually be in trouble.
- */
- int size, i = 0;
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
- /*
- * This is an non TCP/UDP IP frame, so
- * the headers aren't split into a small
- * buffer. We have to use the small buffer
- * that contains our sg list as our skb to
- * send upstairs. Copy the sg list here to
- * a local buffer and use it to find the
- * pages to chain.
- */
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers & data in chain of large.\n",
- length);
- skb = sbq_desc->p.skb;
- sbq_desc->p.skb = NULL;
- skb_reserve(skb, NET_IP_ALIGN);
- }
- do {
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- size = (length < rx_ring->lbq_buf_size) ? length :
- rx_ring->lbq_buf_size;
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Adding page %d to skb for %d bytes.\n",
- i, size);
- skb_fill_page_desc(skb, i,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += size;
- length -= size;
- i++;
- } while (length > 0);
- ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
- &hlen);
- __pskb_pull_tail(skb, hlen);
- }
- return skb;
-}
-
-/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u16 vlan_id)
-{
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
-
- QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
-
- skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
- if (unlikely(!skb)) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "No skb available, drop packet.\n");
- rx_ring->rx_dropped++;
- return;
- }
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + ETH_HLEN) {
- dev_kfree_skb_any(skb);
- rx_ring->rx_dropped++;
- return;
- }
-
- /* loopback self test for ethtool */
- if (test_bit(QL_SELFTEST, &qdev->flags)) {
- ql_check_lb_frame(qdev, skb);
- dev_kfree_skb_any(skb);
- return;
- }
-
- prefetch(skb->data);
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
- rx_ring->rx_multicast++;
- }
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Promiscuous Packet.\n");
- }
-
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- /* If rx checksum is on, and there are no
- * csum or frame errors.
- */
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
- if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- }
- }
- }
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ring->napi, skb);
- else
- netif_receive_skb(skb);
-}
-
-/* Process an inbound completion from an rx ring. */
-static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
-{
- u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
- (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
- ((le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
-
- QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
-
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
- /* The data and headers are split into
- * separate buffers.
- */
- ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
- /* The data fit in a single small buffer.
- * Allocate a new skb, copy the data and
- * return the buffer to the free pool.
- */
- ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
- } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
- /* TCP packet in a page chunk that's been checksummed.
- * Tack it on to our GRO skb and let it go.
- */
- ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
- /* Non-TCP packet in a page chunk. Allocate an
- * skb, tack it on frags, and send it up.
- */
- ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
- } else {
- /* Non-TCP/UDP large frames that span multiple buffers
- * can be processed corrrectly by the split frame logic.
- */
- ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
- }
-
- return (unsigned long)length;
-}
-
-/* Process an outbound completion from an rx ring. */
-static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
- struct ob_mac_iocb_rsp *mac_rsp)
-{
- struct tx_ring *tx_ring;
- struct tx_ring_desc *tx_ring_desc;
-
- QL_DUMP_OB_MAC_RSP(mac_rsp);
- tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
- tx_ring_desc = &tx_ring->q[mac_rsp->tid];
- ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
- tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
- tx_ring->tx_packets++;
- dev_kfree_skb(tx_ring_desc->skb);
- tx_ring_desc->skb = NULL;
-
- if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
- OB_MAC_IOCB_RSP_S |
- OB_MAC_IOCB_RSP_L |
- OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Total descriptor length did not match transfer length.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Frame too short to be valid, not sent.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Frame too long, but sent anyway.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "PCI backplane error. Frame not sent.\n");
- }
- }
- atomic_inc(&tx_ring->tx_count);
-}
-
-/* Fire up a handler to reset the MPI processor. */
-void ql_queue_fw_error(struct ql_adapter *qdev)
-{
- ql_link_off(qdev);
- queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
-}
-
-void ql_queue_asic_error(struct ql_adapter *qdev)
-{
- ql_link_off(qdev);
- ql_disable_interrupts(qdev);
- /* Clear adapter up bit to signal the recovery
- * process that it shouldn't kill the reset worker
- * thread
- */
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
- /* Set asic recovery bit to indicate reset process that we are
- * in fatal error recovery process rather than normal close
- */
- set_bit(QL_ASIC_RECOVERY, &qdev->flags);
- queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
-}
-
-static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
- struct ib_ae_iocb_rsp *ib_ae_rsp)
-{
- switch (ib_ae_rsp->event) {
- case MGMT_ERR_EVENT:
- netif_err(qdev, rx_err, qdev->ndev,
- "Management Processor Fatal Error.\n");
- ql_queue_fw_error(qdev);
- return;
-
- case CAM_LOOKUP_ERR_EVENT:
- netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
- netdev_err(qdev->ndev, "This event shouldn't occur.\n");
- ql_queue_asic_error(qdev);
- return;
-
- case SOFT_ECC_ERROR_EVENT:
- netdev_err(qdev->ndev, "Soft ECC error detected.\n");
- ql_queue_asic_error(qdev);
- break;
-
- case PCI_ERR_ANON_BUF_RD:
- netdev_err(qdev->ndev, "PCI error occurred when reading "
- "anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
- ql_queue_asic_error(qdev);
- break;
-
- default:
- netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
- ib_ae_rsp->event);
- ql_queue_asic_error(qdev);
- break;
- }
-}
-
-static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
-{
- struct ql_adapter *qdev = rx_ring->qdev;
- u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- struct ob_mac_iocb_rsp *net_rsp = NULL;
- int count = 0;
-
- struct tx_ring *tx_ring;
- /* While there are entries in the completion queue. */
- while (prod != rx_ring->cnsmr_idx) {
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "cq_id = %d, prod = %d, cnsmr = %d\n",
- rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
-
- net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
- rmb();
- switch (net_rsp->opcode) {
-
- case OPCODE_OB_MAC_TSO_IOCB:
- case OPCODE_OB_MAC_IOCB:
- ql_process_mac_tx_intr(qdev, net_rsp);
- break;
- default:
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- }
- count++;
- ql_update_cq(rx_ring);
- prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- }
- if (!net_rsp)
- return 0;
- ql_write_cq_idx(rx_ring);
- tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
- if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
- if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
- /*
- * The queue got stopped because the tx_ring was full.
- * Wake it up, because it's now at least 25% empty.
- */
- netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
- }
-
- return count;
-}
-
-static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
-{
- struct ql_adapter *qdev = rx_ring->qdev;
- u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- struct ql_net_rsp_iocb *net_rsp;
- int count = 0;
-
- /* While there are entries in the completion queue. */
- while (prod != rx_ring->cnsmr_idx) {
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "cq_id = %d, prod = %d, cnsmr = %d\n",
- rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
-
- net_rsp = rx_ring->curr_entry;
- rmb();
- switch (net_rsp->opcode) {
- case OPCODE_IB_MAC_IOCB:
- ql_process_mac_rx_intr(qdev, rx_ring,
- (struct ib_mac_iocb_rsp *)
- net_rsp);
- break;
-
- case OPCODE_IB_AE_IOCB:
- ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
- net_rsp);
- break;
- default:
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- break;
- }
- count++;
- ql_update_cq(rx_ring);
- prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- if (count == budget)
- break;
- }
- ql_update_buffer_queues(qdev, rx_ring);
- ql_write_cq_idx(rx_ring);
- return count;
-}
-
-static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
-{
- struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
- struct ql_adapter *qdev = rx_ring->qdev;
- struct rx_ring *trx_ring;
- int i, work_done = 0;
- struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
-
- /* Service the TX rings first. They start
- * right after the RSS rings. */
- for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
- trx_ring = &qdev->rx_ring[i];
- /* If this TX completion ring belongs to this vector and
- * it's not empty then service it.
- */
- if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
- (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
- trx_ring->cnsmr_idx)) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "%s: Servicing TX completion ring %d.\n",
- __func__, trx_ring->cq_id);
- ql_clean_outbound_rx_ring(trx_ring);
- }
- }
-
- /*
- * Now service the RSS ring if it's active.
- */
- if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
- rx_ring->cnsmr_idx) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "%s: Servicing RX completion ring %d.\n",
- __func__, rx_ring->cq_id);
- work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
- }
-
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- ql_enable_completion_interrupt(qdev, rx_ring->irq);
- }
- return work_done;
-}
-
-static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
- ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
- NIC_RCV_CFG_VLAN_MATCH_AND_NON);
- } else {
- ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
- }
-}
-
-/**
- * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
- * based on the features to enable/disable hardware vlan accel
- */
-static int qlge_update_hw_vlan_features(struct net_device *ndev,
- netdev_features_t features)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status = 0;
- bool need_restart = netif_running(ndev);
-
- if (need_restart) {
- status = ql_adapter_down(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring down the adapter\n");
- return status;
- }
- }
-
- /* update the features with resent change */
- ndev->features = features;
-
- if (need_restart) {
- status = ql_adapter_up(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring up the adapter\n");
- return status;
- }
- }
-
- return status;
-}
-
-static int qlge_set_features(struct net_device *ndev,
- netdev_features_t features)
-{
- netdev_features_t changed = ndev->features ^ features;
- int err;
-
- if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
- /* Update the behavior of vlan accel in the adapter */
- err = qlge_update_hw_vlan_features(ndev, features);
- if (err)
- return err;
-
- qlge_vlan_mode(ndev, features);
- }
-
- return 0;
-}
-
-static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
-{
- u32 enable_bit = MAC_ADDR_E;
- int err;
-
- err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
- MAC_ADDR_TYPE_VLAN, vid);
- if (err)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init vlan address.\n");
- return err;
-}
-
-static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
- int err;
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- err = __qlge_vlan_rx_add_vid(qdev, vid);
- set_bit(vid, qdev->active_vlans);
-
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-
- return err;
-}
-
-static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
-{
- u32 enable_bit = 0;
- int err;
-
- err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
- MAC_ADDR_TYPE_VLAN, vid);
- if (err)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to clear vlan address.\n");
- return err;
-}
-
-static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
- int err;
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- err = __qlge_vlan_rx_kill_vid(qdev, vid);
- clear_bit(vid, qdev->active_vlans);
-
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-
- return err;
-}
-
-static void qlge_restore_vlan(struct ql_adapter *qdev)
-{
- int status;
- u16 vid;
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return;
-
- for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
- __qlge_vlan_rx_add_vid(qdev, vid);
-
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-}
-
-/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
-static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
-{
- struct rx_ring *rx_ring = dev_id;
- napi_schedule(&rx_ring->napi);
- return IRQ_HANDLED;
-}
-
-/* This handles a fatal error, MPI activity, and the default
- * rx_ring in an MSI-X multiple vector environment.
- * In MSI/Legacy environment it also process the rest of
- * the rx_rings.
- */
-static irqreturn_t qlge_isr(int irq, void *dev_id)
-{
- struct rx_ring *rx_ring = dev_id;
- struct ql_adapter *qdev = rx_ring->qdev;
- struct intr_context *intr_context = &qdev->intr_context[0];
- u32 var;
- int work_done = 0;
-
- spin_lock(&qdev->hw_lock);
- if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "Shared Interrupt, Not ours!\n");
- spin_unlock(&qdev->hw_lock);
- return IRQ_NONE;
- }
- spin_unlock(&qdev->hw_lock);
-
- var = ql_disable_completion_interrupt(qdev, intr_context->intr);
-
- /*
- * Check for fatal error.
- */
- if (var & STS_FE) {
- ql_queue_asic_error(qdev);
- netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
- var = ql_read32(qdev, ERR_STS);
- netdev_err(qdev->ndev, "Resetting chip. "
- "Error Status Register = 0x%x\n", var);
- return IRQ_HANDLED;
- }
-
- /*
- * Check MPI processor activity.
- */
- if ((var & STS_PI) &&
- (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
- /*
- * We've got an async event or mailbox completion.
- * Handle it and clear the source of the interrupt.
- */
- netif_err(qdev, intr, qdev->ndev,
- "Got MPI processor interrupt.\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work_on(smp_processor_id(),
- qdev->workqueue, &qdev->mpi_work, 0);
- work_done++;
- }
-
- /*
- * Get the bit-mask that shows the active queues for this
- * pass. Compare it to the queues that this irq services
- * and call napi if there's a match.
- */
- var = ql_read32(qdev, ISR1);
- if (var & intr_context->irq_mask) {
- netif_info(qdev, intr, qdev->ndev,
- "Waking handler for rx_ring[0].\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
- napi_schedule(&rx_ring->napi);
- work_done++;
- }
- ql_enable_completion_interrupt(qdev, intr_context->intr);
- return work_done ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
-{
-
- if (skb_is_gso(skb)) {
- int err;
- __be16 l3_proto = vlan_get_protocol(skb);
-
- err = skb_cow_head(skb, 0);
- if (err < 0)
- return err;
-
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
- mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
- mac_iocb_ptr->net_trans_offset =
- cpu_to_le16(skb_network_offset(skb) |
- skb_transport_offset(skb)
- << OB_MAC_TRANSPORT_HDR_SHIFT);
- mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
- if (likely(l3_proto == htons(ETH_P_IP))) {
- struct iphdr *iph = ip_hdr(skb);
- iph->check = 0;
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (l3_proto == htons(ETH_P_IPV6)) {
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
- return 1;
- }
- return 0;
-}
-
-static void ql_hw_csum_setup(struct sk_buff *skb,
- struct ob_mac_tso_iocb_req *mac_iocb_ptr)
-{
- int len;
- struct iphdr *iph = ip_hdr(skb);
- __sum16 *check;
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
- mac_iocb_ptr->net_trans_offset =
- cpu_to_le16(skb_network_offset(skb) |
- skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
-
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
- len = (ntohs(iph->tot_len) - (iph->ihl << 2));
- if (likely(iph->protocol == IPPROTO_TCP)) {
- check = &(tcp_hdr(skb)->check);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) +
- (tcp_hdr(skb)->doff << 2));
- } else {
- check = &(udp_hdr(skb)->check);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) +
- sizeof(struct udphdr));
- }
- *check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, len, iph->protocol, 0);
-}
-
-static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
-{
- struct tx_ring_desc *tx_ring_desc;
- struct ob_mac_iocb_req *mac_iocb_ptr;
- struct ql_adapter *qdev = netdev_priv(ndev);
- int tso;
- struct tx_ring *tx_ring;
- u32 tx_ring_idx = (u32) skb->queue_mapping;
-
- tx_ring = &qdev->tx_ring[tx_ring_idx];
-
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
-
- if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- netif_info(qdev, tx_queued, qdev->ndev,
- "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
- __func__, tx_ring_idx);
- netif_stop_subqueue(ndev, tx_ring->wq_id);
- tx_ring->tx_errors++;
- return NETDEV_TX_BUSY;
- }
- tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
- mac_iocb_ptr = tx_ring_desc->queue_entry;
- memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
-
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
- mac_iocb_ptr->tid = tx_ring_desc->index;
- /* We use the upper 32-bits to store the tx queue for this IO.
- * When we get the completion we can use it to establish the context.
- */
- mac_iocb_ptr->txq_idx = tx_ring_idx;
- tx_ring_desc->skb = skb;
-
- mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
-
- if (skb_vlan_tag_present(skb)) {
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
- mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
- mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
- }
- tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
- ql_hw_csum_setup(skb,
- (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
- }
- if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
- NETDEV_TX_OK) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "Could not map the segments.\n");
- tx_ring->tx_errors++;
- return NETDEV_TX_BUSY;
- }
- QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
- tx_ring->prod_idx++;
- if (tx_ring->prod_idx == tx_ring->wq_len)
- tx_ring->prod_idx = 0;
- wmb();
-
- ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "tx queued, slot %d, len %d\n",
- tx_ring->prod_idx, skb->len);
-
- atomic_dec(&tx_ring->tx_count);
-
- if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- netif_stop_subqueue(ndev, tx_ring->wq_id);
- if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
- /*
- * The queue got stopped because the tx_ring was full.
- * Wake it up, because it's now at least 25% empty.
- */
- netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
- }
- return NETDEV_TX_OK;
-}
-
-
-static void ql_free_shadow_space(struct ql_adapter *qdev)
-{
- if (qdev->rx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
- qdev->rx_ring_shadow_reg_area = NULL;
- }
- if (qdev->tx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->tx_ring_shadow_reg_area,
- qdev->tx_ring_shadow_reg_dma);
- qdev->tx_ring_shadow_reg_area = NULL;
- }
-}
-
-static int ql_alloc_shadow_space(struct ql_adapter *qdev)
-{
- qdev->rx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->rx_ring_shadow_reg_dma);
- if (qdev->rx_ring_shadow_reg_area == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Allocation of RX shadow space failed.\n");
- return -ENOMEM;
- }
-
- qdev->tx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->tx_ring_shadow_reg_dma);
- if (qdev->tx_ring_shadow_reg_area == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Allocation of TX shadow space failed.\n");
- goto err_wqp_sh_area;
- }
- return 0;
-
-err_wqp_sh_area:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
- return -ENOMEM;
-}
-
-static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
-{
- struct tx_ring_desc *tx_ring_desc;
- int i;
- struct ob_mac_iocb_req *mac_iocb_ptr;
-
- mac_iocb_ptr = tx_ring->wq_base;
- tx_ring_desc = tx_ring->q;
- for (i = 0; i < tx_ring->wq_len; i++) {
- tx_ring_desc->index = i;
- tx_ring_desc->skb = NULL;
- tx_ring_desc->queue_entry = mac_iocb_ptr;
- mac_iocb_ptr++;
- tx_ring_desc++;
- }
- atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
-}
-
-static void ql_free_tx_resources(struct ql_adapter *qdev,
- struct tx_ring *tx_ring)
-{
- if (tx_ring->wq_base) {
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
- tx_ring->wq_base = NULL;
- }
- kfree(tx_ring->q);
- tx_ring->q = NULL;
-}
-
-static int ql_alloc_tx_resources(struct ql_adapter *qdev,
- struct tx_ring *tx_ring)
-{
- tx_ring->wq_base =
- pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
- &tx_ring->wq_base_dma);
-
- if ((tx_ring->wq_base == NULL) ||
- tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
- goto pci_alloc_err;
-
- tx_ring->q =
- kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
- GFP_KERNEL);
- if (tx_ring->q == NULL)
- goto err;
-
- return 0;
-err:
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
- tx_ring->wq_base = NULL;
-pci_alloc_err:
- netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
- return -ENOMEM;
-}
-
-static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- struct bq_desc *lbq_desc;
-
- uint32_t curr_idx, clean_idx;
-
- curr_idx = rx_ring->lbq_curr_idx;
- clean_idx = rx_ring->lbq_clean_idx;
- while (curr_idx != clean_idx) {
- lbq_desc = &rx_ring->lbq[curr_idx];
-
- if (lbq_desc->p.pg_chunk.last_flag) {
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- lbq_desc->p.pg_chunk.last_flag = 0;
- }
-
- put_page(lbq_desc->p.pg_chunk.page);
- lbq_desc->p.pg_chunk.page = NULL;
-
- if (++curr_idx == rx_ring->lbq_len)
- curr_idx = 0;
-
- }
- if (rx_ring->pg_chunk.page) {
- pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
- put_page(rx_ring->pg_chunk.page);
- rx_ring->pg_chunk.page = NULL;
- }
-}
-
-static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *sbq_desc;
-
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- if (sbq_desc == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "sbq_desc %d is NULL.\n", i);
- return;
- }
- if (sbq_desc->p.skb) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- }
- }
-}
-
-/* Free all large and small rx buffers associated
- * with the completion queues for this device.
- */
-static void ql_free_rx_buffers(struct ql_adapter *qdev)
-{
- int i;
- struct rx_ring *rx_ring;
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->lbq)
- ql_free_lbq_buffers(qdev, rx_ring);
- if (rx_ring->sbq)
- ql_free_sbq_buffers(qdev, rx_ring);
- }
-}
-
-static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
-{
- struct rx_ring *rx_ring;
- int i;
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->type != TX_Q)
- ql_update_buffer_queues(qdev, rx_ring);
- }
-}
-
-static void ql_init_lbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *lbq_desc;
- __le64 *bq = rx_ring->lbq_base;
-
- memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->lbq_len; i++) {
- lbq_desc = &rx_ring->lbq[i];
- memset(lbq_desc, 0, sizeof(*lbq_desc));
- lbq_desc->index = i;
- lbq_desc->addr = bq;
- bq++;
- }
-}
-
-static void ql_init_sbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *sbq_desc;
- __le64 *bq = rx_ring->sbq_base;
-
- memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- memset(sbq_desc, 0, sizeof(*sbq_desc));
- sbq_desc->index = i;
- sbq_desc->addr = bq;
- bq++;
- }
-}
-
-static void ql_free_rx_resources(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- /* Free the small buffer queue. */
- if (rx_ring->sbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->sbq_size,
- rx_ring->sbq_base, rx_ring->sbq_base_dma);
- rx_ring->sbq_base = NULL;
- }
-
- /* Free the small buffer queue control blocks. */
- kfree(rx_ring->sbq);
- rx_ring->sbq = NULL;
-
- /* Free the large buffer queue. */
- if (rx_ring->lbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->lbq_size,
- rx_ring->lbq_base, rx_ring->lbq_base_dma);
- rx_ring->lbq_base = NULL;
- }
-
- /* Free the large buffer queue control blocks. */
- kfree(rx_ring->lbq);
- rx_ring->lbq = NULL;
-
- /* Free the rx queue. */
- if (rx_ring->cq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->cq_size,
- rx_ring->cq_base, rx_ring->cq_base_dma);
- rx_ring->cq_base = NULL;
- }
-}
-
-/* Allocate queues and buffers for this completions queue based
- * on the values in the parameter structure. */
-static int ql_alloc_rx_resources(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
-
- /*
- * Allocate the completion queue for this rx_ring.
- */
- rx_ring->cq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
- &rx_ring->cq_base_dma);
-
- if (rx_ring->cq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
- return -ENOMEM;
- }
-
- if (rx_ring->sbq_len) {
- /*
- * Allocate small buffer queue.
- */
- rx_ring->sbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
- &rx_ring->sbq_base_dma);
-
- if (rx_ring->sbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue allocation failed.\n");
- goto err_mem;
- }
-
- /*
- * Allocate small buffer queue control blocks.
- */
- rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL)
- goto err_mem;
-
- ql_init_sbq_ring(qdev, rx_ring);
- }
-
- if (rx_ring->lbq_len) {
- /*
- * Allocate large buffer queue.
- */
- rx_ring->lbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
- &rx_ring->lbq_base_dma);
-
- if (rx_ring->lbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue allocation failed.\n");
- goto err_mem;
- }
- /*
- * Allocate large buffer queue control blocks.
- */
- rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL)
- goto err_mem;
-
- ql_init_lbq_ring(qdev, rx_ring);
- }
-
- return 0;
-
-err_mem:
- ql_free_rx_resources(qdev, rx_ring);
- return -ENOMEM;
-}
-
-static void ql_tx_ring_clean(struct ql_adapter *qdev)
-{
- struct tx_ring *tx_ring;
- struct tx_ring_desc *tx_ring_desc;
- int i, j;
-
- /*
- * Loop through all queues and free
- * any resources.
- */
- for (j = 0; j < qdev->tx_ring_count; j++) {
- tx_ring = &qdev->tx_ring[j];
- for (i = 0; i < tx_ring->wq_len; i++) {
- tx_ring_desc = &tx_ring->q[i];
- if (tx_ring_desc && tx_ring_desc->skb) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Freeing lost SKB %p, from queue %d, index %d.\n",
- tx_ring_desc->skb, j,
- tx_ring_desc->index);
- ql_unmap_send(qdev, tx_ring_desc,
- tx_ring_desc->map_cnt);
- dev_kfree_skb(tx_ring_desc->skb);
- tx_ring_desc->skb = NULL;
- }
- }
- }
-}
-
-static void ql_free_mem_resources(struct ql_adapter *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->tx_ring_count; i++)
- ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
- for (i = 0; i < qdev->rx_ring_count; i++)
- ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
- ql_free_shadow_space(qdev);
-}
-
-static int ql_alloc_mem_resources(struct ql_adapter *qdev)
-{
- int i;
-
- /* Allocate space for our shadow registers and such. */
- if (ql_alloc_shadow_space(qdev))
- return -ENOMEM;
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
- netif_err(qdev, ifup, qdev->ndev,
- "RX resource allocation failed.\n");
- goto err_mem;
- }
- }
- /* Allocate tx queue resources */
- for (i = 0; i < qdev->tx_ring_count; i++) {
- if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
- netif_err(qdev, ifup, qdev->ndev,
- "TX resource allocation failed.\n");
- goto err_mem;
- }
- }
- return 0;
-
-err_mem:
- ql_free_mem_resources(qdev);
- return -ENOMEM;
-}
-
-/* Set up the rx ring control block and pass it to the chip.
- * The control block is defined as
- * "Completion Queue Initialization Control Block", or cqicb.
- */
-static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- struct cqicb *cqicb = &rx_ring->cqicb;
- void *shadow_reg = qdev->rx_ring_shadow_reg_area +
- (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
- u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
- (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
- void __iomem *doorbell_area =
- qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
- int err = 0;
- u16 bq_len;
- u64 tmp;
- __le64 *base_indirect_ptr;
- int page_entries;
-
- /* Set up the shadow registers for this ring. */
- rx_ring->prod_idx_sh_reg = shadow_reg;
- rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
- *rx_ring->prod_idx_sh_reg = 0;
- shadow_reg += sizeof(u64);
- shadow_reg_dma += sizeof(u64);
- rx_ring->lbq_base_indirect = shadow_reg;
- rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
- shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- rx_ring->sbq_base_indirect = shadow_reg;
- rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
-
- /* PCI doorbell mem area + 0x00 for consumer index register */
- rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
- rx_ring->cnsmr_idx = 0;
- rx_ring->curr_entry = rx_ring->cq_base;
-
- /* PCI doorbell mem area + 0x04 for valid register */
- rx_ring->valid_db_reg = doorbell_area + 0x04;
-
- /* PCI doorbell mem area + 0x18 for large buffer consumer */
- rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
-
- /* PCI doorbell mem area + 0x1c */
- rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
-
- memset((void *)cqicb, 0, sizeof(struct cqicb));
- cqicb->msix_vect = rx_ring->irq;
-
- bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
- cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
-
- cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
-
- cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
-
- /*
- * Set up the control block load flags.
- */
- cqicb->flags = FLAGS_LC | /* Load queue base address */
- FLAGS_LV | /* Load MSI-X vector */
- FLAGS_LI; /* Load irq delay values */
- if (rx_ring->lbq_len) {
- cqicb->flags |= FLAGS_LL; /* Load lbq values */
- tmp = (u64)rx_ring->lbq_base_dma;
- base_indirect_ptr = rx_ring->lbq_base_indirect;
- page_entries = 0;
- do {
- *base_indirect_ptr = cpu_to_le64(tmp);
- tmp += DB_PAGE_SIZE;
- base_indirect_ptr++;
- page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- cqicb->lbq_addr =
- cpu_to_le64(rx_ring->lbq_base_indirect_dma);
- bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
- (u16) rx_ring->lbq_buf_size;
- cqicb->lbq_buf_size = cpu_to_le16(bq_len);
- bq_len = (rx_ring->lbq_len == 65536) ? 0 :
- (u16) rx_ring->lbq_len;
- cqicb->lbq_len = cpu_to_le16(bq_len);
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_clean_idx = 0;
- rx_ring->lbq_free_cnt = rx_ring->lbq_len;
- }
- if (rx_ring->sbq_len) {
- cqicb->flags |= FLAGS_LS; /* Load sbq values */
- tmp = (u64)rx_ring->sbq_base_dma;
- base_indirect_ptr = rx_ring->sbq_base_indirect;
- page_entries = 0;
- do {
- *base_indirect_ptr = cpu_to_le64(tmp);
- tmp += DB_PAGE_SIZE;
- base_indirect_ptr++;
- page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
- cqicb->sbq_addr =
- cpu_to_le64(rx_ring->sbq_base_indirect_dma);
- cqicb->sbq_buf_size =
- cpu_to_le16((u16)(rx_ring->sbq_buf_size));
- bq_len = (rx_ring->sbq_len == 65536) ? 0 :
- (u16) rx_ring->sbq_len;
- cqicb->sbq_len = cpu_to_le16(bq_len);
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_clean_idx = 0;
- rx_ring->sbq_free_cnt = rx_ring->sbq_len;
- }
- switch (rx_ring->type) {
- case TX_Q:
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
- break;
- case RX_Q:
- /* Inbound completion handling rx_rings run in
- * separate NAPI contexts.
- */
- netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
- 64);
- cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
- break;
- default:
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Invalid rx_ring->type = %d.\n", rx_ring->type);
- }
- err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
- return err;
- }
- return err;
-}
-
-static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
-{
- struct wqicb *wqicb = (struct wqicb *)tx_ring;
- void __iomem *doorbell_area =
- qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
- void *shadow_reg = qdev->tx_ring_shadow_reg_area +
- (tx_ring->wq_id * sizeof(u64));
- u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
- (tx_ring->wq_id * sizeof(u64));
- int err = 0;
-
- /*
- * Assign doorbell registers for this tx_ring.
- */
- /* TX PCI doorbell mem area for tx producer index */
- tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
- tx_ring->prod_idx = 0;
- /* TX PCI doorbell mem area + 0x04 */
- tx_ring->valid_db_reg = doorbell_area + 0x04;
-
- /*
- * Assign shadow registers for this tx_ring.
- */
- tx_ring->cnsmr_idx_sh_reg = shadow_reg;
- tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
-
- wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
- wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
- Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
- wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
- wqicb->rid = 0;
- wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
-
- wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
-
- ql_init_tx_ring(qdev, tx_ring);
-
- err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
- (u16) tx_ring->wq_id);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
- return err;
- }
- return err;
-}
-
-static void ql_disable_msix(struct ql_adapter *qdev)
-{
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- pci_disable_msix(qdev->pdev);
- clear_bit(QL_MSIX_ENABLED, &qdev->flags);
- kfree(qdev->msi_x_entry);
- qdev->msi_x_entry = NULL;
- } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
- pci_disable_msi(qdev->pdev);
- clear_bit(QL_MSI_ENABLED, &qdev->flags);
- }
-}
-
-/* We start by trying to get the number of vectors
- * stored in qdev->intr_count. If we don't get that
- * many then we reduce the count and try again.
- */
-static void ql_enable_msix(struct ql_adapter *qdev)
-{
- int i, err;
-
- /* Get the MSIX vectors. */
- if (qlge_irq_type == MSIX_IRQ) {
- /* Try to alloc space for the msix struct,
- * if it fails then go to MSI/legacy.
- */
- qdev->msi_x_entry = kcalloc(qdev->intr_count,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!qdev->msi_x_entry) {
- qlge_irq_type = MSI_IRQ;
- goto msi;
- }
-
- for (i = 0; i < qdev->intr_count; i++)
- qdev->msi_x_entry[i].entry = i;
-
- err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
- 1, qdev->intr_count);
- if (err < 0) {
- kfree(qdev->msi_x_entry);
- qdev->msi_x_entry = NULL;
- netif_warn(qdev, ifup, qdev->ndev,
- "MSI-X Enable failed, trying MSI.\n");
- qlge_irq_type = MSI_IRQ;
- } else {
- qdev->intr_count = err;
- set_bit(QL_MSIX_ENABLED, &qdev->flags);
- netif_info(qdev, ifup, qdev->ndev,
- "MSI-X Enabled, got %d vectors.\n",
- qdev->intr_count);
- return;
- }
- }
-msi:
- qdev->intr_count = 1;
- if (qlge_irq_type == MSI_IRQ) {
- if (!pci_enable_msi(qdev->pdev)) {
- set_bit(QL_MSI_ENABLED, &qdev->flags);
- netif_info(qdev, ifup, qdev->ndev,
- "Running with MSI interrupts.\n");
- return;
- }
- }
- qlge_irq_type = LEG_IRQ;
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Running with legacy interrupts.\n");
-}
-
-/* Each vector services 1 RSS ring and and 1 or more
- * TX completion rings. This function loops through
- * the TX completion rings and assigns the vector that
- * will service it. An example would be if there are
- * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
- * This would mean that vector 0 would service RSS ring 0
- * and TX completion rings 0,1,2 and 3. Vector 1 would
- * service RSS ring 1 and TX completion rings 4,5,6 and 7.
- */
-static void ql_set_tx_vect(struct ql_adapter *qdev)
-{
- int i, j, vect;
- u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Assign irq vectors to TX rx_rings.*/
- for (vect = 0, j = 0, i = qdev->rss_ring_count;
- i < qdev->rx_ring_count; i++) {
- if (j == tx_rings_per_vector) {
- vect++;
- j = 0;
- }
- qdev->rx_ring[i].irq = vect;
- j++;
- }
- } else {
- /* For single vector all rings have an irq
- * of zero.
- */
- for (i = 0; i < qdev->rx_ring_count; i++)
- qdev->rx_ring[i].irq = 0;
- }
-}
-
-/* Set the interrupt mask for this vector. Each vector
- * will service 1 RSS ring and 1 or more TX completion
- * rings. This function sets up a bit mask per vector
- * that indicates which rings it services.
- */
-static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
-{
- int j, vect = ctx->intr;
- u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Add the RSS ring serviced by this vector
- * to the mask.
- */
- ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
- /* Add the TX ring(s) serviced by this vector
- * to the mask. */
- for (j = 0; j < tx_rings_per_vector; j++) {
- ctx->irq_mask |=
- (1 << qdev->rx_ring[qdev->rss_ring_count +
- (vect * tx_rings_per_vector) + j].cq_id);
- }
- } else {
- /* For single vector we just shift each queue's
- * ID into the mask.
- */
- for (j = 0; j < qdev->rx_ring_count; j++)
- ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
- }
-}
-
-/*
- * Here we build the intr_context structures based on
- * our rx_ring count and intr vector count.
- * The intr_context structure is used to hook each vector
- * to possibly different handlers.
- */
-static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
-{
- int i = 0;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Each rx_ring has it's
- * own intr_context since we have separate
- * vectors for each queue.
- */
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- qdev->rx_ring[i].irq = i;
- intr_context->intr = i;
- intr_context->qdev = qdev;
- /* Set up this vector's bit-mask that indicates
- * which queues it services.
- */
- ql_set_irq_mask(qdev, intr_context);
- /*
- * We set up each vectors enable/disable/read bits so
- * there's no bit/mask calculations in the critical path.
- */
- intr_context->intr_en_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
- | i;
- intr_context->intr_dis_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
- INTR_EN_IHD | i;
- intr_context->intr_read_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
- i;
- if (i == 0) {
- /* The first vector/queue handles
- * broadcast/multicast, fatal errors,
- * and firmware events. This in addition
- * to normal inbound NAPI processing.
- */
- intr_context->handler = qlge_isr;
- sprintf(intr_context->name, "%s-rx-%d",
- qdev->ndev->name, i);
- } else {
- /*
- * Inbound queues handle unicast frames only.
- */
- intr_context->handler = qlge_msix_rx_isr;
- sprintf(intr_context->name, "%s-rx-%d",
- qdev->ndev->name, i);
- }
- }
- } else {
- /*
- * All rx_rings use the same intr_context since
- * there is only one vector.
- */
- intr_context->intr = 0;
- intr_context->qdev = qdev;
- /*
- * We set up each vectors enable/disable/read bits so
- * there's no bit/mask calculations in the critical path.
- */
- intr_context->intr_en_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
- intr_context->intr_dis_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_DISABLE;
- intr_context->intr_read_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
- /*
- * Single interrupt means one handler for all rings.
- */
- intr_context->handler = qlge_isr;
- sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
- /* Set up this vector's bit-mask that indicates
- * which queues it services. In this case there is
- * a single vector so it will service all RSS and
- * TX completion rings.
- */
- ql_set_irq_mask(qdev, intr_context);
- }
- /* Tell the TX completion rings which MSIx vector
- * they will be using.
- */
- ql_set_tx_vect(qdev);
-}
-
-static void ql_free_irq(struct ql_adapter *qdev)
-{
- int i;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- if (intr_context->hooked) {
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- free_irq(qdev->msi_x_entry[i].vector,
- &qdev->rx_ring[i]);
- } else {
- free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- }
- }
- }
- ql_disable_msix(qdev);
-}
-
-static int ql_request_irq(struct ql_adapter *qdev)
-{
- int i;
- int status = 0;
- struct pci_dev *pdev = qdev->pdev;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- ql_resolve_queues_to_irqs(qdev);
-
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- atomic_set(&intr_context->irq_cnt, 0);
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- status = request_irq(qdev->msi_x_entry[i].vector,
- intr_context->handler,
- 0,
- intr_context->name,
- &qdev->rx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed request for MSIX interrupt %d.\n",
- i);
- goto err_irq;
- }
- } else {
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "trying msi or legacy interrupts.\n");
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: irq = %d.\n", __func__, pdev->irq);
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: context->name = %s.\n", __func__,
- intr_context->name);
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: dev_id = 0x%p.\n", __func__,
- &qdev->rx_ring[0]);
- status =
- request_irq(pdev->irq, qlge_isr,
- test_bit(QL_MSI_ENABLED,
- &qdev->
- flags) ? 0 : IRQF_SHARED,
- intr_context->name, &qdev->rx_ring[0]);
- if (status)
- goto err_irq;
-
- netif_err(qdev, ifup, qdev->ndev,
- "Hooked intr %d, queue type %s, with name %s.\n",
- i,
- qdev->rx_ring[0].type == DEFAULT_Q ?
- "DEFAULT_Q" :
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
- intr_context->name);
- }
- intr_context->hooked = 1;
- }
- return status;
-err_irq:
- netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
- ql_free_irq(qdev);
- return status;
-}
-
-static int ql_start_rss(struct ql_adapter *qdev)
-{
- static const u8 init_hash_seed[] = {
- 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
- 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
- 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
- 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
- 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
- };
- struct ricb *ricb = &qdev->ricb;
- int status = 0;
- int i;
- u8 *hash_id = (u8 *) ricb->hash_cq_id;
-
- memset((void *)ricb, 0, sizeof(*ricb));
-
- ricb->base_cq = RSS_L4K;
- ricb->flags =
- (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
- ricb->mask = cpu_to_le16((u16)(0x3ff));
-
- /*
- * Fill out the Indirection Table.
- */
- for (i = 0; i < 1024; i++)
- hash_id[i] = (i & (qdev->rss_ring_count - 1));
-
- memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
- memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
-
- status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
- return status;
- }
- return status;
-}
-
-static int ql_clear_routing_entries(struct ql_adapter *qdev)
-{
- int i, status = 0;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
- /* Clear all the entries in the routing table. */
- for (i = 0; i < 16; i++) {
- status = ql_set_routing_reg(qdev, i, 0, 0);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for CAM packets.\n");
- break;
- }
- }
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Initialize the frame-to-queue routing. */
-static int ql_route_initialize(struct ql_adapter *qdev)
-{
- int status = 0;
-
- /* Clear all the entries in the routing table. */
- status = ql_clear_routing_entries(qdev);
- if (status)
- return status;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
- RT_IDX_IP_CSUM_ERR, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register "
- "for IP CSUM error packets.\n");
- goto exit;
- }
- status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
- RT_IDX_TU_CSUM_ERR, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register "
- "for TCP/UDP CSUM error packets.\n");
- goto exit;
- }
- status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for broadcast packets.\n");
- goto exit;
- }
- /* If we have more than one inbound queue, then turn on RSS in the
- * routing block.
- */
- if (qdev->rss_ring_count > 1) {
- status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
- RT_IDX_RSS_MATCH, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for MATCH RSS packets.\n");
- goto exit;
- }
- }
-
- status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
- RT_IDX_CAM_HIT, 1);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for CAM packets.\n");
-exit:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-int ql_cam_route_initialize(struct ql_adapter *qdev)
-{
- int status, set;
-
- /* If check if the link is up and use to
- * determine if we are setting or clearing
- * the MAC address in the CAM.
- */
- set = ql_read32(qdev, STS);
- set &= qdev->port_link_up;
- status = ql_set_mac_addr(qdev, set);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
- return status;
- }
-
- status = ql_route_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
-
- return status;
-}
-
-static int ql_adapter_initialize(struct ql_adapter *qdev)
-{
- u32 value, mask;
- int i;
- int status = 0;
-
- /*
- * Set up the System register to halt on errors.
- */
- value = SYS_EFE | SYS_FAE;
- mask = value << 16;
- ql_write32(qdev, SYS, mask | value);
-
- /* Set the default queue, and VLAN behavior. */
- value = NIC_RCV_CFG_DFQ;
- mask = NIC_RCV_CFG_DFQ_MASK;
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- value |= NIC_RCV_CFG_RV;
- mask |= (NIC_RCV_CFG_RV << 16);
- }
- ql_write32(qdev, NIC_RCV_CFG, (mask | value));
-
- /* Set the MPI interrupt to enabled. */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
-
- /* Enable the function, set pagesize, enable error checking. */
- value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K;
- value |= SPLT_SETTING;
-
- /* Set/clear header splitting. */
- mask = FSC_VM_PAGESIZE_MASK |
- FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
- ql_write32(qdev, FSC, mask | value);
-
- ql_write32(qdev, SPLT_HDR, SPLT_LEN);
-
- /* Set RX packet routing to use port/pci function on which the
- * packet arrived on in addition to usual frame routing.
- * This is helpful on bonding where both interfaces can have
- * the same MAC address.
- */
- ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
- /* Reroute all packets to our Interface.
- * They may have been routed to MPI firmware
- * due to WOL.
- */
- value = ql_read32(qdev, MGMT_RCV_CFG);
- value &= ~MGMT_RCV_CFG_RM;
- mask = 0xffff0000;
-
- /* Sticky reg needs clearing due to WOL. */
- ql_write32(qdev, MGMT_RCV_CFG, mask);
- ql_write32(qdev, MGMT_RCV_CFG, mask | value);
-
- /* Default WOL is enable on Mezz cards */
- if (qdev->pdev->subsystem_device == 0x0068 ||
- qdev->pdev->subsystem_device == 0x0180)
- qdev->wol = WAKE_MAGIC;
-
- /* Start up the rx queues. */
- for (i = 0; i < qdev->rx_ring_count; i++) {
- status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to start rx ring[%d].\n", i);
- return status;
- }
- }
-
- /* If there is more than one inbound completion queue
- * then download a RICB to configure RSS.
- */
- if (qdev->rss_ring_count > 1) {
- status = ql_start_rss(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
- return status;
- }
- }
-
- /* Start up the tx queues. */
- for (i = 0; i < qdev->tx_ring_count; i++) {
- status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to start tx ring[%d].\n", i);
- return status;
- }
- }
-
- /* Initialize the port and set the max framesize. */
- status = qdev->nic_ops->port_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
-
- /* Set up the MAC address and frame routing filter. */
- status = ql_cam_route_initialize(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- return status;
- }
-
- /* Start NAPI for the RSS queues. */
- for (i = 0; i < qdev->rss_ring_count; i++)
- napi_enable(&qdev->rx_ring[i].napi);
-
- return status;
-}
-
-/* Issue soft reset to chip. */
-static int ql_adapter_reset(struct ql_adapter *qdev)
-{
- u32 value;
- int status = 0;
- unsigned long end_jiffies;
-
- /* Clear all the entries in the routing table. */
- status = ql_clear_routing_entries(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
- return status;
- }
-
- /* Check if bit is set then skip the mailbox command and
- * clear the bit, else we are in normal reset process.
- */
- if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
- /* Stop management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
-
- /* Wait for the NIC and MGMNT FIFOs to empty. */
- ql_wait_fifo_empty(qdev);
- } else
- clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
-
- ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
-
- end_jiffies = jiffies + usecs_to_jiffies(30);
- do {
- value = ql_read32(qdev, RST_FO);
- if ((value & RST_FO_FR) == 0)
- break;
- cpu_relax();
- } while (time_before(jiffies, end_jiffies));
-
- if (value & RST_FO_FR) {
- netif_err(qdev, ifdown, qdev->ndev,
- "ETIMEDOUT!!! errored out of resetting the chip!\n");
- status = -ETIMEDOUT;
- }
-
- /* Resume management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
- return status;
-}
-
-static void ql_display_dev_info(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- netif_info(qdev, probe, qdev->ndev,
- "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
- "XG Roll = %d, XG Rev = %d.\n",
- qdev->func,
- qdev->port,
- qdev->chip_rev_id & 0x0000000f,
- qdev->chip_rev_id >> 4 & 0x0000000f,
- qdev->chip_rev_id >> 8 & 0x0000000f,
- qdev->chip_rev_id >> 12 & 0x0000000f);
- netif_info(qdev, probe, qdev->ndev,
- "MAC address %pM\n", ndev->dev_addr);
-}
-
-static int ql_wol(struct ql_adapter *qdev)
-{
- int status = 0;
- u32 wol = MB_WOL_DISABLE;
-
- /* The CAM is still intact after a reset, but if we
- * are doing WOL, then we may need to program the
- * routing regs. We would also need to issue the mailbox
- * commands to instruct the MPI what to do per the ethtool
- * settings.
- */
-
- if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
- WAKE_MCAST | WAKE_BCAST)) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
- qdev->wol);
- return -EINVAL;
- }
-
- if (qdev->wol & WAKE_MAGIC) {
- status = ql_mb_wol_set_magic(qdev, 1);
- if (status) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Failed to set magic packet on %s.\n",
- qdev->ndev->name);
- return status;
- } else
- netif_info(qdev, drv, qdev->ndev,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
-
- wol |= MB_WOL_MAGIC_PKT;
- }
-
- if (qdev->wol) {
- wol |= MB_WOL_MODE_ON;
- status = ql_mb_wol_mode(qdev, wol);
- netif_err(qdev, drv, qdev->ndev,
- "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "Successfully set" : "Failed",
- wol, qdev->ndev->name);
- }
-
- return status;
-}
-
-static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
-{
-
- /* Don't kill the reset worker thread if we
- * are in the process of recovery.
- */
- if (test_bit(QL_ADAPTER_UP, &qdev->flags))
- cancel_delayed_work_sync(&qdev->asic_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_work);
- cancel_delayed_work_sync(&qdev->mpi_idc_work);
- cancel_delayed_work_sync(&qdev->mpi_core_to_log);
- cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
-}
-
-static int ql_adapter_down(struct ql_adapter *qdev)
-{
- int i, status = 0;
-
- ql_link_off(qdev);
-
- ql_cancel_all_work_sync(qdev);
-
- for (i = 0; i < qdev->rss_ring_count; i++)
- napi_disable(&qdev->rx_ring[i].napi);
-
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
-
- ql_disable_interrupts(qdev);
-
- ql_tx_ring_clean(qdev);
-
- /* Call netif_napi_del() from common point.
- */
- for (i = 0; i < qdev->rss_ring_count; i++)
- netif_napi_del(&qdev->rx_ring[i].napi);
-
- status = ql_adapter_reset(qdev);
- if (status)
- netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
- qdev->func);
- ql_free_rx_buffers(qdev);
-
- return status;
-}
-
-static int ql_adapter_up(struct ql_adapter *qdev)
-{
- int err = 0;
-
- err = ql_adapter_initialize(qdev);
- if (err) {
- netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
- goto err_init;
- }
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- ql_alloc_rx_buffers(qdev);
- /* If the port is initialized and the
- * link is up the turn on the carrier.
- */
- if ((ql_read32(qdev, STS) & qdev->port_init) &&
- (ql_read32(qdev, STS) & qdev->port_link_up))
- ql_link_on(qdev);
- /* Restore rx mode. */
- clear_bit(QL_ALLMULTI, &qdev->flags);
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- qlge_set_multicast_list(qdev->ndev);
-
- /* Restore vlan setting. */
- qlge_restore_vlan(qdev);
-
- ql_enable_interrupts(qdev);
- ql_enable_all_completion_interrupts(qdev);
- netif_tx_start_all_queues(qdev->ndev);
-
- return 0;
-err_init:
- ql_adapter_reset(qdev);
- return err;
-}
-
-static void ql_release_adapter_resources(struct ql_adapter *qdev)
-{
- ql_free_mem_resources(qdev);
- ql_free_irq(qdev);
-}
-
-static int ql_get_adapter_resources(struct ql_adapter *qdev)
-{
- int status = 0;
-
- if (ql_alloc_mem_resources(qdev)) {
- netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
- return -ENOMEM;
- }
- status = ql_request_irq(qdev);
- return status;
-}
-
-static int qlge_close(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- /* If we hit pci_channel_io_perm_failure
- * failure condition, then we already
- * brought the adapter down.
- */
- if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
- netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
- clear_bit(QL_EEH_FATAL, &qdev->flags);
- return 0;
- }
-
- /*
- * Wait for device to recover from a reset.
- * (Rarely happens, but possible.)
- */
- while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
- msleep(1);
- ql_adapter_down(qdev);
- ql_release_adapter_resources(qdev);
- return 0;
-}
-
-static int ql_configure_rings(struct ql_adapter *qdev)
-{
- int i;
- struct rx_ring *rx_ring;
- struct tx_ring *tx_ring;
- int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
- unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
-
- qdev->lbq_buf_order = get_order(lbq_buf_len);
-
- /* In a perfect world we have one RSS ring for each CPU
- * and each has it's own vector. To do that we ask for
- * cpu_cnt vectors. ql_enable_msix() will adjust the
- * vector count to what we actually get. We then
- * allocate an RSS ring for each.
- * Essentially, we are doing min(cpu_count, msix_vector_count).
- */
- qdev->intr_count = cpu_cnt;
- ql_enable_msix(qdev);
- /* Adjust the RSS ring count to the actual vector count. */
- qdev->rss_ring_count = qdev->intr_count;
- qdev->tx_ring_count = cpu_cnt;
- qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
-
- for (i = 0; i < qdev->tx_ring_count; i++) {
- tx_ring = &qdev->tx_ring[i];
- memset((void *)tx_ring, 0, sizeof(*tx_ring));
- tx_ring->qdev = qdev;
- tx_ring->wq_id = i;
- tx_ring->wq_len = qdev->tx_ring_size;
- tx_ring->wq_size =
- tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
-
- /*
- * The completion queue ID for the tx rings start
- * immediately after the rss rings.
- */
- tx_ring->cq_id = qdev->rss_ring_count + i;
- }
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- memset((void *)rx_ring, 0, sizeof(*rx_ring));
- rx_ring->qdev = qdev;
- rx_ring->cq_id = i;
- rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
- if (i < qdev->rss_ring_count) {
- /*
- * Inbound (RSS) queues.
- */
- rx_ring->cq_len = qdev->rx_ring_size;
- rx_ring->cq_size =
- rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = NUM_LARGE_BUFFERS;
- rx_ring->lbq_size =
- rx_ring->lbq_len * sizeof(__le64);
- rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- rx_ring->sbq_len = NUM_SMALL_BUFFERS;
- rx_ring->sbq_size =
- rx_ring->sbq_len * sizeof(__le64);
- rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
- rx_ring->type = RX_Q;
- } else {
- /*
- * Outbound queue handles outbound completions only.
- */
- /* outbound cq is same size as tx_ring it services. */
- rx_ring->cq_len = qdev->tx_ring_size;
- rx_ring->cq_size =
- rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = 0;
- rx_ring->lbq_size = 0;
- rx_ring->lbq_buf_size = 0;
- rx_ring->sbq_len = 0;
- rx_ring->sbq_size = 0;
- rx_ring->sbq_buf_size = 0;
- rx_ring->type = TX_Q;
- }
- }
- return 0;
-}
-
-static int qlge_open(struct net_device *ndev)
-{
- int err = 0;
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- err = ql_adapter_reset(qdev);
- if (err)
- return err;
-
- err = ql_configure_rings(qdev);
- if (err)
- return err;
-
- err = ql_get_adapter_resources(qdev);
- if (err)
- goto error_up;
-
- err = ql_adapter_up(qdev);
- if (err)
- goto error_up;
-
- return err;
-
-error_up:
- ql_release_adapter_resources(qdev);
- return err;
-}
-
-static int ql_change_rx_buffers(struct ql_adapter *qdev)
-{
- struct rx_ring *rx_ring;
- int i, status;
- u32 lbq_buf_len;
-
- /* Wait for an outstanding reset to complete. */
- if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- int i = 4;
-
- while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Waiting for adapter UP...\n");
- ssleep(1);
- }
-
- if (!i) {
- netif_err(qdev, ifup, qdev->ndev,
- "Timed out waiting for adapter UP\n");
- return -ETIMEDOUT;
- }
- }
-
- status = ql_adapter_down(qdev);
- if (status)
- goto error;
-
- /* Get the new rx buffer size. */
- lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
- qdev->lbq_buf_order = get_order(lbq_buf_len);
-
- for (i = 0; i < qdev->rss_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- /* Set the new size. */
- rx_ring->lbq_buf_size = lbq_buf_len;
- }
-
- status = ql_adapter_up(qdev);
- if (status)
- goto error;
-
- return status;
-error:
- netif_alert(qdev, ifup, qdev->ndev,
- "Driver up/down cycle failed, closing device.\n");
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- dev_close(qdev->ndev);
- return status;
-}
-
-static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
-
- if (ndev->mtu == 1500 && new_mtu == 9000) {
- netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
- } else if (ndev->mtu == 9000 && new_mtu == 1500) {
- netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
- } else
- return -EINVAL;
-
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 3*HZ);
-
- ndev->mtu = new_mtu;
-
- if (!netif_running(qdev->ndev)) {
- return 0;
- }
-
- status = ql_change_rx_buffers(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Changing MTU failed.\n");
- }
-
- return status;
-}
-
-static struct net_device_stats *qlge_get_stats(struct net_device
- *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- struct rx_ring *rx_ring = &qdev->rx_ring[0];
- struct tx_ring *tx_ring = &qdev->tx_ring[0];
- unsigned long pkts, mcast, dropped, errors, bytes;
- int i;
-
- /* Get RX stats. */
- pkts = mcast = dropped = errors = bytes = 0;
- for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
- pkts += rx_ring->rx_packets;
- bytes += rx_ring->rx_bytes;
- dropped += rx_ring->rx_dropped;
- errors += rx_ring->rx_errors;
- mcast += rx_ring->rx_multicast;
- }
- ndev->stats.rx_packets = pkts;
- ndev->stats.rx_bytes = bytes;
- ndev->stats.rx_dropped = dropped;
- ndev->stats.rx_errors = errors;
- ndev->stats.multicast = mcast;
-
- /* Get TX stats. */
- pkts = errors = bytes = 0;
- for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
- pkts += tx_ring->tx_packets;
- bytes += tx_ring->tx_bytes;
- errors += tx_ring->tx_errors;
- }
- ndev->stats.tx_packets = pkts;
- ndev->stats.tx_bytes = bytes;
- ndev->stats.tx_errors = errors;
- return &ndev->stats;
-}
-
-static void qlge_set_multicast_list(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- struct netdev_hw_addr *ha;
- int i, status;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return;
- /*
- * Set or clear promiscuous mode if a
- * transition is taking place.
- */
- if (ndev->flags & IFF_PROMISC) {
- if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
- if (ql_set_routing_reg
- (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set promiscuous mode.\n");
- } else {
- set_bit(QL_PROMISCUOUS, &qdev->flags);
- }
- }
- } else {
- if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
- if (ql_set_routing_reg
- (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to clear promiscuous mode.\n");
- } else {
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- }
- }
- }
-
- /*
- * Set or clear all multicast mode if a
- * transition is taking place.
- */
- if ((ndev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
- if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
- if (ql_set_routing_reg
- (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set all-multi mode.\n");
- } else {
- set_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
- } else {
- if (test_bit(QL_ALLMULTI, &qdev->flags)) {
- if (ql_set_routing_reg
- (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to clear all-multi mode.\n");
- } else {
- clear_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
- }
-
- if (!netdev_mc_empty(ndev)) {
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- goto exit;
- i = 0;
- netdev_for_each_mc_addr(ha, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
- MAC_ADDR_TYPE_MULTI_MAC, i)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to loadmulticast address.\n");
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- goto exit;
- }
- i++;
- }
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- if (ql_set_routing_reg
- (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set multicast match mode.\n");
- } else {
- set_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
-exit:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
-}
-
-static int qlge_set_mac_address(struct net_device *ndev, void *p)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- struct sockaddr *addr = p;
- int status;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
- /* Update local copy of current mac address. */
- memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
- if (status)
- netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- return status;
-}
-
-static void qlge_tx_timeout(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- ql_queue_asic_error(qdev);
-}
-
-static void ql_asic_reset_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, asic_reset_work.work);
- int status;
- rtnl_lock();
- status = ql_adapter_down(qdev);
- if (status)
- goto error;
-
- status = ql_adapter_up(qdev);
- if (status)
- goto error;
-
- /* Restore rx mode. */
- clear_bit(QL_ALLMULTI, &qdev->flags);
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- qlge_set_multicast_list(qdev->ndev);
-
- rtnl_unlock();
- return;
-error:
- netif_alert(qdev, ifup, qdev->ndev,
- "Driver up/down cycle failed, closing device\n");
-
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- dev_close(qdev->ndev);
- rtnl_unlock();
-}
-
-static const struct nic_operations qla8012_nic_ops = {
- .get_flash = ql_get_8012_flash_params,
- .port_initialize = ql_8012_port_initialize,
-};
-
-static const struct nic_operations qla8000_nic_ops = {
- .get_flash = ql_get_8000_flash_params,
- .port_initialize = ql_8000_port_initialize,
-};
-
-/* Find the pcie function number for the other NIC
- * on this chip. Since both NIC functions share a
- * common firmware we have the lowest enabled function
- * do any common work. Examples would be resetting
- * after a fatal firmware error, or doing a firmware
- * coredump.
- */
-static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
-{
- int status = 0;
- u32 temp;
- u32 nic_func1, nic_func2;
-
- status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
- &temp);
- if (status)
- return status;
-
- nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
- MPI_TEST_NIC_FUNC_MASK);
- nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
- MPI_TEST_NIC_FUNC_MASK);
-
- if (qdev->func == nic_func1)
- qdev->alt_func = nic_func2;
- else if (qdev->func == nic_func2)
- qdev->alt_func = nic_func1;
- else
- status = -EIO;
-
- return status;
-}
-
-static int ql_get_board_info(struct ql_adapter *qdev)
-{
- int status;
- qdev->func =
- (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
- if (qdev->func > 3)
- return -EIO;
-
- status = ql_get_alt_pcie_func(qdev);
- if (status)
- return status;
-
- qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
- if (qdev->port) {
- qdev->xg_sem_mask = SEM_XGMAC1_MASK;
- qdev->port_link_up = STS_PL1;
- qdev->port_init = STS_PI1;
- qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
- qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
- } else {
- qdev->xg_sem_mask = SEM_XGMAC0_MASK;
- qdev->port_link_up = STS_PL0;
- qdev->port_init = STS_PI0;
- qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
- qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
- }
- qdev->chip_rev_id = ql_read32(qdev, REV_ID);
- qdev->device_id = qdev->pdev->device;
- if (qdev->device_id == QLGE_DEVICE_ID_8012)
- qdev->nic_ops = &qla8012_nic_ops;
- else if (qdev->device_id == QLGE_DEVICE_ID_8000)
- qdev->nic_ops = &qla8000_nic_ops;
- return status;
-}
-
-static void ql_release_all(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (qdev->workqueue) {
- destroy_workqueue(qdev->workqueue);
- qdev->workqueue = NULL;
- }
-
- if (qdev->reg_base)
- iounmap(qdev->reg_base);
- if (qdev->doorbell_area)
- iounmap(qdev->doorbell_area);
- vfree(qdev->mpi_coredump);
- pci_release_regions(pdev);
-}
-
-static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
- int cards_found)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int err = 0;
-
- memset((void *)qdev, 0, sizeof(*qdev));
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "PCI device enable failed.\n");
- return err;
- }
-
- qdev->ndev = ndev;
- qdev->pdev = pdev;
- pci_set_drvdata(pdev, ndev);
-
- /* Set PCIe read request size */
- err = pcie_set_readrq(pdev, 4096);
- if (err) {
- dev_err(&pdev->dev, "Set readrq failed.\n");
- goto err_out1;
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_err(&pdev->dev, "PCI region request failed.\n");
- return err;
- }
-
- pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- set_bit(QL_DMA64, &qdev->flags);
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration.\n");
- goto err_out2;
- }
-
- /* Set PCIe reset type for EEH to fundamental. */
- pdev->needs_freset = 1;
- pci_save_state(pdev);
- qdev->reg_base =
- ioremap_nocache(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
- if (!qdev->reg_base) {
- dev_err(&pdev->dev, "Register mapping failed.\n");
- err = -ENOMEM;
- goto err_out2;
- }
-
- qdev->doorbell_area_size = pci_resource_len(pdev, 3);
- qdev->doorbell_area =
- ioremap_nocache(pci_resource_start(pdev, 3),
- pci_resource_len(pdev, 3));
- if (!qdev->doorbell_area) {
- dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
- err = -ENOMEM;
- goto err_out2;
- }
-
- err = ql_get_board_info(qdev);
- if (err) {
- dev_err(&pdev->dev, "Register access failed.\n");
- err = -EIO;
- goto err_out2;
- }
- qdev->msg_enable = netif_msg_init(debug, default_msg);
- spin_lock_init(&qdev->hw_lock);
- spin_lock_init(&qdev->stats_lock);
-
- if (qlge_mpi_coredump) {
- qdev->mpi_coredump =
- vmalloc(sizeof(struct ql_mpi_coredump));
- if (qdev->mpi_coredump == NULL) {
- err = -ENOMEM;
- goto err_out2;
- }
- if (qlge_force_coredump)
- set_bit(QL_FRC_COREDUMP, &qdev->flags);
- }
- /* make sure the EEPROM is good */
- err = qdev->nic_ops->get_flash(qdev);
- if (err) {
- dev_err(&pdev->dev, "Invalid FLASH.\n");
- goto err_out2;
- }
-
- /* Keep local copy of current mac address. */
- memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
-
- /* Set up the default ring sizes. */
- qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
- qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
-
- /* Set up the coalescing parameters. */
- qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
- qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
- qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
- qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
-
- /*
- * Set up the operating parameters.
- */
- qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
- ndev->name);
- if (!qdev->workqueue) {
- err = -ENOMEM;
- goto err_out2;
- }
-
- INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
- INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
- INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
- INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
- INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
- INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
- init_completion(&qdev->ide_completion);
- mutex_init(&qdev->mpi_mutex);
-
- if (!cards_found) {
- dev_info(&pdev->dev, "%s\n", DRV_STRING);
- dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
- DRV_NAME, DRV_VERSION);
- }
- return 0;
-err_out2:
- ql_release_all(pdev);
-err_out1:
- pci_disable_device(pdev);
- return err;
-}
-
-static const struct net_device_ops qlge_netdev_ops = {
- .ndo_open = qlge_open,
- .ndo_stop = qlge_close,
- .ndo_start_xmit = qlge_send,
- .ndo_change_mtu = qlge_change_mtu,
- .ndo_get_stats = qlge_get_stats,
- .ndo_set_rx_mode = qlge_set_multicast_list,
- .ndo_set_mac_address = qlge_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = qlge_tx_timeout,
- .ndo_set_features = qlge_set_features,
- .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
-};
-
-static void ql_timer(struct timer_list *t)
-{
- struct ql_adapter *qdev = from_timer(qdev, t, timer);
- u32 var = 0;
-
- var = ql_read32(qdev, STS);
- if (pci_channel_offline(qdev->pdev)) {
- netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
- return;
- }
-
- mod_timer(&qdev->timer, jiffies + (5*HZ));
-}
-
-static int qlge_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_entry)
-{
- struct net_device *ndev = NULL;
- struct ql_adapter *qdev = NULL;
- static int cards_found = 0;
- int err = 0;
-
- ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
- min(MAX_CPUS, netif_get_num_default_rss_queues()));
- if (!ndev)
- return -ENOMEM;
-
- err = ql_init_device(pdev, ndev, cards_found);
- if (err < 0) {
- free_netdev(ndev);
- return err;
- }
-
- qdev = netdev_priv(ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_RXCSUM;
- ndev->features = ndev->hw_features;
- ndev->vlan_features = ndev->hw_features;
- /* vlan gets same features (except vlan filter) */
- ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX);
-
- if (test_bit(QL_DMA64, &qdev->flags))
- ndev->features |= NETIF_F_HIGHDMA;
-
- /*
- * Set up net_device structure.
- */
- ndev->tx_queue_len = qdev->tx_ring_size;
- ndev->irq = pdev->irq;
-
- ndev->netdev_ops = &qlge_netdev_ops;
- ndev->ethtool_ops = &qlge_ethtool_ops;
- ndev->watchdog_timeo = 10 * HZ;
-
- /* MTU range: this driver only supports 1500 or 9000, so this only
- * filters out values above or below, and we'll rely on
- * qlge_change_mtu to make sure only 1500 or 9000 are allowed
- */
- ndev->min_mtu = ETH_DATA_LEN;
- ndev->max_mtu = 9000;
-
- err = register_netdev(ndev);
- if (err) {
- dev_err(&pdev->dev, "net device registration failed.\n");
- ql_release_all(pdev);
- pci_disable_device(pdev);
- free_netdev(ndev);
- return err;
- }
- /* Start up the timer to trigger EEH if
- * the bus goes dead
- */
- timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
- mod_timer(&qdev->timer, jiffies + (5*HZ));
- ql_link_off(qdev);
- ql_display_dev_info(ndev);
- atomic_set(&qdev->lb_count, 0);
- cards_found++;
- return 0;
-}
-
-netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
-{
- return qlge_send(skb, ndev);
-}
-
-int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
-{
- return ql_clean_inbound_rx_ring(rx_ring, budget);
-}
-
-static void qlge_remove(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
- del_timer_sync(&qdev->timer);
- ql_cancel_all_work_sync(qdev);
- unregister_netdev(ndev);
- ql_release_all(pdev);
- pci_disable_device(pdev);
- free_netdev(ndev);
-}
-
-/* Clean up resources without touching hardware. */
-static void ql_eeh_close(struct net_device *ndev)
-{
- int i;
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (netif_carrier_ok(ndev)) {
- netif_carrier_off(ndev);
- netif_stop_queue(ndev);
- }
-
- /* Disabling the timer */
- ql_cancel_all_work_sync(qdev);
-
- for (i = 0; i < qdev->rss_ring_count; i++)
- netif_napi_del(&qdev->rx_ring[i].napi);
-
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
- ql_tx_ring_clean(qdev);
- ql_free_rx_buffers(qdev);
- ql_release_adapter_resources(qdev);
-}
-
-/*
- * This callback is called by the PCI subsystem whenever
- * a PCI bus error is detected.
- */
-static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
- enum pci_channel_state state)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- switch (state) {
- case pci_channel_io_normal:
- return PCI_ERS_RESULT_CAN_RECOVER;
- case pci_channel_io_frozen:
- netif_device_detach(ndev);
- del_timer_sync(&qdev->timer);
- if (netif_running(ndev))
- ql_eeh_close(ndev);
- pci_disable_device(pdev);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- dev_err(&pdev->dev,
- "%s: pci_channel_io_perm_failure.\n", __func__);
- del_timer_sync(&qdev->timer);
- ql_eeh_close(ndev);
- set_bit(QL_EEH_FATAL, &qdev->flags);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- /* Request a slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/*
- * This callback is called after the PCI buss has been reset.
- * Basically, this tries to restart the card from scratch.
- * This is a shortened version of the device probe/discovery code,
- * it resembles the first-half of the () routine.
- */
-static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- pdev->error_state = pci_channel_io_normal;
-
- pci_restore_state(pdev);
- if (pci_enable_device(pdev)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
-
- if (ql_adapter_reset(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
- set_bit(QL_EEH_FATAL, &qdev->flags);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void qlge_io_resume(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
- int err = 0;
-
- if (netif_running(ndev)) {
- err = qlge_open(ndev);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev,
- "Device initialization failed after reset.\n");
- return;
- }
- } else {
- netif_err(qdev, ifup, qdev->ndev,
- "Device was not running prior to EEH.\n");
- }
- mod_timer(&qdev->timer, jiffies + (5*HZ));
- netif_device_attach(ndev);
-}
-
-static const struct pci_error_handlers qlge_err_handler = {
- .error_detected = qlge_io_error_detected,
- .slot_reset = qlge_io_slot_reset,
- .resume = qlge_io_resume,
-};
-
-static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
- int err;
-
- netif_device_detach(ndev);
- del_timer_sync(&qdev->timer);
-
- if (netif_running(ndev)) {
- err = ql_adapter_down(qdev);
- if (!err)
- return err;
- }
-
- ql_wol(qdev);
- err = pci_save_state(pdev);
- if (err)
- return err;
-
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int qlge_resume(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pci_enable_device(pdev);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
- return err;
- }
- pci_set_master(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
- if (netif_running(ndev)) {
- err = ql_adapter_up(qdev);
- if (err)
- return err;
- }
-
- mod_timer(&qdev->timer, jiffies + (5*HZ));
- netif_device_attach(ndev);
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
-static void qlge_shutdown(struct pci_dev *pdev)
-{
- qlge_suspend(pdev, PMSG_SUSPEND);
-}
-
-static struct pci_driver qlge_driver = {
- .name = DRV_NAME,
- .id_table = qlge_pci_tbl,
- .probe = qlge_probe,
- .remove = qlge_remove,
-#ifdef CONFIG_PM
- .suspend = qlge_suspend,
- .resume = qlge_resume,
-#endif
- .shutdown = qlge_shutdown,
- .err_handler = &qlge_err_handler
-};
-
-module_pci_driver(qlge_driver);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
deleted file mode 100644
index 957c72985a06..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ /dev/null
@@ -1,1285 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "qlge.h"
-
-int ql_unpause_mpi_risc(struct ql_adapter *qdev)
-{
- u32 tmp;
-
- /* Un-pause the RISC */
- tmp = ql_read32(qdev, CSR);
- if (!(tmp & CSR_RP))
- return -EIO;
-
- ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
- return 0;
-}
-
-int ql_pause_mpi_risc(struct ql_adapter *qdev)
-{
- u32 tmp;
- int count = UDELAY_COUNT;
-
- /* Pause the RISC */
- ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
- do {
- tmp = ql_read32(qdev, CSR);
- if (tmp & CSR_RP)
- break;
- mdelay(UDELAY_DELAY);
- count--;
- } while (count);
- return (count == 0) ? -ETIMEDOUT : 0;
-}
-
-int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
-{
- u32 tmp;
- int count = UDELAY_COUNT;
-
- /* Reset the RISC */
- ql_write32(qdev, CSR, CSR_CMD_SET_RST);
- do {
- tmp = ql_read32(qdev, CSR);
- if (tmp & CSR_RR) {
- ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
- break;
- }
- mdelay(UDELAY_DELAY);
- count--;
- } while (count);
- return (count == 0) ? -ETIMEDOUT : 0;
-}
-
-int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
-{
- int status;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* set up for reg read */
- ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* get the data */
- *data = ql_read32(qdev, PROC_DATA);
-exit:
- return status;
-}
-
-int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* write the data to the data reg */
- ql_write32(qdev, PROC_DATA, data);
- /* trigger the write */
- ql_write32(qdev, PROC_ADDR, reg);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
-exit:
- return status;
-}
-
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
-{
- int status;
- status = ql_write_mpi_reg(qdev, 0x00001010, 1);
- return status;
-}
-
-/* Determine if we are in charge of the firwmare. If
- * we are the lower of the 2 NIC pcie functions, or if
- * we are the higher function and the lower function
- * is not enabled.
- */
-int ql_own_firmware(struct ql_adapter *qdev)
-{
- u32 temp;
-
- /* If we are the lower of the 2 NIC functions
- * on the chip the we are responsible for
- * core dump and firmware reset after an error.
- */
- if (qdev->func < qdev->alt_func)
- return 1;
-
- /* If we are the higher of the 2 NIC functions
- * on the chip and the lower function is not
- * enabled, then we are responsible for
- * core dump and firmware reset after an error.
- */
- temp = ql_read32(qdev, STS);
- if (!(temp & (1 << (8 + qdev->alt_func))))
- return 1;
-
- return 0;
-
-}
-
-static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int i, status;
-
- status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
- if (status)
- return -EBUSY;
- for (i = 0; i < mbcp->out_count; i++) {
- status =
- ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
- &mbcp->mbox_out[i]);
- if (status) {
- netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
- break;
- }
- }
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
- return status;
-}
-
-/* Wait for a single mailbox command to complete.
- * Returns zero on success.
- */
-static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
-{
- int count = 100;
- u32 value;
-
- do {
- value = ql_read32(qdev, STS);
- if (value & STS_PI)
- return 0;
- mdelay(UDELAY_DELAY); /* 100ms */
- } while (--count);
- return -ETIMEDOUT;
-}
-
-/* Execute a single mailbox command.
- * Caller must hold PROC_ADDR semaphore.
- */
-static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int i, status;
-
- /*
- * Make sure there's nothing pending.
- * This shouldn't happen.
- */
- if (ql_read32(qdev, CSR) & CSR_HRI)
- return -EIO;
-
- status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
- if (status)
- return status;
-
- /*
- * Fill the outbound mailboxes.
- */
- for (i = 0; i < mbcp->in_count; i++) {
- status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
- mbcp->mbox_in[i]);
- if (status)
- goto end;
- }
- /*
- * Wake up the MPI firmware.
- */
- ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
-end:
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
- return status;
-}
-
-/* We are being asked by firmware to accept
- * a change to the port. This is only
- * a change to max frame sizes (Tx/Rx), pause
- * parameters, or loopback mode. We wake up a worker
- * to handler processing this since a mailbox command
- * will need to be sent to ACK the request.
- */
-static int ql_idc_req_aen(struct ql_adapter *qdev)
-{
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
-
- netif_err(qdev, drv, qdev->ndev, "Enter!\n");
- /* Get the status data and start up a thread to
- * handle the request.
- */
- mbcp->out_count = 4;
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting ASIC!\n");
- ql_queue_asic_error(qdev);
- } else {
- /* Begin polled mode early so
- * we don't get another interrupt
- * when we leave mpi_worker.
- */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
- }
- return status;
-}
-
-/* Process an inter-device event completion.
- * If good, signal the caller's completion.
- */
-static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
-{
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
- mbcp->out_count = 4;
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting RISC!\n");
- ql_queue_fw_error(qdev);
- } else
- /* Wake up the sleeping mpi_idc_work thread that is
- * waiting for this event.
- */
- complete(&qdev->ide_completion);
-
- return status;
-}
-
-static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
- mbcp->out_count = 2;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "%s: Could not get mailbox status.\n", __func__);
- return;
- }
-
- qdev->link_status = mbcp->mbox_out[1];
- netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
-
- /* If we're coming back from an IDC event
- * then set up the CAM and frame routing.
- */
- if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
- status = ql_cam_route_initialize(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- return;
- } else
- clear_bit(QL_CAM_RT_SET, &qdev->flags);
- }
-
- /* Queue up a worker to check the frame
- * size information, and fix it if it's not
- * to our liking.
- */
- if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
- netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
- set_bit(QL_PORT_CFG, &qdev->flags);
- /* Begin polled mode early so
- * we don't get another interrupt
- * when we leave mpi_worker dpc.
- */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 0);
- }
-
- ql_link_on(qdev);
-}
-
-static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 3;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
-
- ql_link_off(qdev);
-}
-
-static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 5;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
- else
- netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
-
- return status;
-}
-
-static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 1;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
- else
- netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
-
- return status;
-}
-
-static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 6;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
- else {
- int i;
- netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
- for (i = 0; i < mbcp->out_count; i++)
- netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
- i, mbcp->mbox_out[i]);
-
- }
-
- return status;
-}
-
-static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 2;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
- } else {
- netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
- mbcp->mbox_out[1]);
- qdev->fw_rev_id = mbcp->mbox_out[1];
- status = ql_cam_route_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- }
-}
-
-/* Process an async event and clear it unless it's an
- * error condition.
- * This can get called iteratively from the mpi_work thread
- * when events arrive via an interrupt.
- * It also gets called when a mailbox command is polling for
- * it's completion. */
-static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
- int orig_count = mbcp->out_count;
-
- /* Just get mailbox zero for now. */
- mbcp->out_count = 1;
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting ASIC!\n");
- ql_queue_asic_error(qdev);
- goto end;
- }
-
- switch (mbcp->mbox_out[0]) {
-
- /* This case is only active when we arrive here
- * as a result of issuing a mailbox command to
- * the firmware.
- */
- case MB_CMD_STS_INTRMDT:
- case MB_CMD_STS_GOOD:
- case MB_CMD_STS_INVLD_CMD:
- case MB_CMD_STS_XFC_ERR:
- case MB_CMD_STS_CSUM_ERR:
- case MB_CMD_STS_ERR:
- case MB_CMD_STS_PARAM_ERR:
- /* We can only get mailbox status if we're polling from an
- * unfinished command. Get the rest of the status data and
- * return back to the caller.
- * We only end up here when we're polling for a mailbox
- * command completion.
- */
- mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
- return status;
-
- /* We are being asked by firmware to accept
- * a change to the port. This is only
- * a change to max frame sizes (Tx/Rx), pause
- * parameters, or loopback mode.
- */
- case AEN_IDC_REQ:
- status = ql_idc_req_aen(qdev);
- break;
-
- /* Process and inbound IDC event.
- * This will happen when we're trying to
- * change tx/rx max frame size, change pause
- * parameters or loopback mode.
- */
- case AEN_IDC_CMPLT:
- case AEN_IDC_EXT:
- status = ql_idc_cmplt_aen(qdev);
- break;
-
- case AEN_LINK_UP:
- ql_link_up(qdev, mbcp);
- break;
-
- case AEN_LINK_DOWN:
- ql_link_down(qdev, mbcp);
- break;
-
- case AEN_FW_INIT_DONE:
- /* If we're in process on executing the firmware,
- * then convert the status to normal mailbox status.
- */
- if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
- mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
- mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
- return status;
- }
- ql_init_fw_done(qdev, mbcp);
- break;
-
- case AEN_AEN_SFP_IN:
- ql_sfp_in(qdev, mbcp);
- break;
-
- case AEN_AEN_SFP_OUT:
- ql_sfp_out(qdev, mbcp);
- break;
-
- /* This event can arrive at boot time or after an
- * MPI reset if the firmware failed to initialize.
- */
- case AEN_FW_INIT_FAIL:
- /* If we're in process on executing the firmware,
- * then convert the status to normal mailbox status.
- */
- if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
- mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
- mbcp->mbox_out[0] = MB_CMD_STS_ERR;
- return status;
- }
- netif_err(qdev, drv, qdev->ndev,
- "Firmware initialization failed.\n");
- status = -EIO;
- ql_queue_fw_error(qdev);
- break;
-
- case AEN_SYS_ERR:
- netif_err(qdev, drv, qdev->ndev, "System Error.\n");
- ql_queue_fw_error(qdev);
- status = -EIO;
- break;
-
- case AEN_AEN_LOST:
- ql_aen_lost(qdev, mbcp);
- break;
-
- case AEN_DCBX_CHG:
- /* Need to support AEN 8110 */
- break;
- default:
- netif_err(qdev, drv, qdev->ndev,
- "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
- /* Clear the MPI firmware status. */
- }
-end:
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
- /* Restore the original mailbox count to
- * what the caller asked for. This can get
- * changed when a mailbox command is waiting
- * for a response and an AEN arrives and
- * is handled.
- * */
- mbcp->out_count = orig_count;
- return status;
-}
-
-/* Execute a single mailbox command.
- * mbcp is a pointer to an array of u32. Each
- * element in the array contains the value for it's
- * respective mailbox register.
- */
-static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
- unsigned long count;
-
- mutex_lock(&qdev->mpi_mutex);
-
- /* Begin polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
-
- /* Load the mailbox registers and wake up MPI RISC. */
- status = ql_exec_mb_cmd(qdev, mbcp);
- if (status)
- goto end;
-
-
- /* If we're generating a system error, then there's nothing
- * to wait for.
- */
- if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
- goto end;
-
- /* Wait for the command to complete. We loop
- * here because some AEN might arrive while
- * we're waiting for the mailbox command to
- * complete. If more than 5 seconds expire we can
- * assume something is wrong. */
- count = jiffies + HZ * MAILBOX_TIMEOUT;
- do {
- /* Wait for the interrupt to come in. */
- status = ql_wait_mbx_cmd_cmplt(qdev);
- if (status)
- continue;
-
- /* Process the event. If it's an AEN, it
- * will be handled in-line or a worker
- * will be spawned. If it's our completion
- * we will catch it below.
- */
- status = ql_mpi_handler(qdev, mbcp);
- if (status)
- goto end;
-
- /* It's either the completion for our mailbox
- * command complete or an AEN. If it's our
- * completion then get out.
- */
- if (((mbcp->mbox_out[0] & 0x0000f000) ==
- MB_CMD_STS_GOOD) ||
- ((mbcp->mbox_out[0] & 0x0000f000) ==
- MB_CMD_STS_INTRMDT))
- goto done;
- } while (time_before(jiffies, count));
-
- netif_err(qdev, drv, qdev->ndev,
- "Timed out waiting for mailbox complete.\n");
- status = -ETIMEDOUT;
- goto end;
-
-done:
-
- /* Now we can clear the interrupt condition
- * and look at our status.
- */
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
-
- if (((mbcp->mbox_out[0] & 0x0000f000) !=
- MB_CMD_STS_GOOD) &&
- ((mbcp->mbox_out[0] & 0x0000f000) !=
- MB_CMD_STS_INTRMDT)) {
- status = -EIO;
- }
-end:
- /* End polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- mutex_unlock(&qdev->mpi_mutex);
- return status;
-}
-
-/* Get MPI firmware version. This will be used for
- * driver banner and for ethtool info.
- * Returns zero on success.
- */
-int ql_mb_about_fw(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 3;
-
- mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed about firmware command\n");
- status = -EIO;
- }
-
- /* Store the firmware version */
- qdev->fw_rev_id = mbcp->mbox_out[1];
-
- return status;
-}
-
-/* Get functional state for MPI firmware.
- * Returns zero on success.
- */
-int ql_mb_get_fw_state(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Get Firmware State.\n");
- status = -EIO;
- }
-
- /* If bit zero is set in mbx 1 then the firmware is
- * running, but not initialized. This should never
- * happen.
- */
- if (mbcp->mbox_out[1] & 1) {
- netif_err(qdev, drv, qdev->ndev,
- "Firmware waiting for initialization.\n");
- status = -EIO;
- }
-
- return status;
-}
-
-/* Send and ACK mailbox command to the firmware to
- * let it continue with the change.
- */
-static int ql_mb_idc_ack(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 5;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
- mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
- mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
- mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
- mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* Get link settings and maximum frame size settings
- * for the current port.
- * Most likely will block.
- */
-int ql_mb_set_port_cfg(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 3;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
- mbcp->mbox_in[1] = qdev->link_config;
- mbcp->mbox_in[2] = qdev->max_frame_size;
-
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
- netif_err(qdev, drv, qdev->ndev,
- "Port Config sent, wait for IDC.\n");
- } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Set Port Configuration.\n");
- status = -EIO;
- }
- return status;
-}
-
-static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
- u32 size)
-{
- int status = 0;
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 9;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
- mbcp->mbox_in[1] = LSW(addr);
- mbcp->mbox_in[2] = MSW(req_dma);
- mbcp->mbox_in[3] = LSW(req_dma);
- mbcp->mbox_in[4] = MSW(size);
- mbcp->mbox_in[5] = LSW(size);
- mbcp->mbox_in[6] = MSW(MSD(req_dma));
- mbcp->mbox_in[7] = LSW(MSD(req_dma));
- mbcp->mbox_in[8] = MSW(addr);
-
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* Issue a mailbox command to dump RISC RAM. */
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count)
-{
- int status;
- char *my_buf;
- dma_addr_t buf_dma;
-
- my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
- &buf_dma);
- if (!my_buf)
- return -EIO;
-
- status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
- if (!status)
- memcpy(buf, my_buf, word_count * sizeof(u32));
-
- pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
- buf_dma);
- return status;
-}
-
-/* Get link settings and maximum frame size settings
- * for the current port.
- * Most likely will block.
- */
-int ql_mb_get_port_cfg(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 3;
-
- mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Get Port Configuration.\n");
- status = -EIO;
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "Passed Get Port Configuration.\n");
- qdev->link_config = mbcp->mbox_out[1];
- qdev->max_frame_size = mbcp->mbox_out[2];
- }
- return status;
-}
-
-int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 2;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
- mbcp->mbox_in[1] = wol;
-
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
- status = -EIO;
- }
- return status;
-}
-
-int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
- u8 *addr = qdev->ndev->dev_addr;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 8;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
- if (enable_wol) {
- mbcp->mbox_in[1] = (u32)addr[0];
- mbcp->mbox_in[2] = (u32)addr[1];
- mbcp->mbox_in[3] = (u32)addr[2];
- mbcp->mbox_in[4] = (u32)addr[3];
- mbcp->mbox_in[5] = (u32)addr[4];
- mbcp->mbox_in[6] = (u32)addr[5];
- mbcp->mbox_in[7] = 0;
- } else {
- mbcp->mbox_in[1] = 0;
- mbcp->mbox_in[2] = 1;
- mbcp->mbox_in[3] = 1;
- mbcp->mbox_in[4] = 1;
- mbcp->mbox_in[5] = 1;
- mbcp->mbox_in[6] = 1;
- mbcp->mbox_in[7] = 0;
- }
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* IDC - Inter Device Communication...
- * Some firmware commands require consent of adjacent FCOE
- * function. This function waits for the OK, or a
- * counter-request for a little more time.i
- * The firmware will complete the request if the other
- * function doesn't respond.
- */
-static int ql_idc_wait(struct ql_adapter *qdev)
-{
- int status = -ETIMEDOUT;
- long wait_time = 1 * HZ;
- struct mbox_params *mbcp = &qdev->idc_mbc;
- do {
- /* Wait here for the command to complete
- * via the IDC process.
- */
- wait_time =
- wait_for_completion_timeout(&qdev->ide_completion,
- wait_time);
- if (!wait_time) {
- netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
- break;
- }
- /* Now examine the response from the IDC process.
- * We might have a good completion or a request for
- * more wait time.
- */
- if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
- netif_err(qdev, drv, qdev->ndev,
- "IDC Time Extension from function.\n");
- wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
- } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
- netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
- status = 0;
- break;
- } else {
- netif_err(qdev, drv, qdev->ndev,
- "IDC: Invalid State 0x%.04x.\n",
- mbcp->mbox_out[0]);
- status = -EIO;
- break;
- }
- } while (wait_time);
-
- return status;
-}
-
-int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 2;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
- mbcp->mbox_in[1] = led_config;
-
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to set LED Configuration.\n");
- status = -EIO;
- }
-
- return status;
-}
-
-int ql_mb_get_led_cfg(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to get LED Configuration.\n");
- status = -EIO;
- } else
- qdev->led_config = mbcp->mbox_out[1];
-
- return status;
-}
-
-int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
- mbcp->mbox_in[1] = control;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- netif_err(qdev, drv, qdev->ndev,
- "Command not supported by firmware.\n");
- status = -EINVAL;
- } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- /* This indicates that the firmware is
- * already in the state we are trying to
- * change it to.
- */
- netif_err(qdev, drv, qdev->ndev,
- "Command parameters make no change.\n");
- }
- return status;
-}
-
-/* Returns a negative error code or the mailbox command status. */
-static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
- *control = 0;
-
- mbcp->in_count = 1;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
- *control = mbcp->mbox_in[1];
- return status;
- }
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- netif_err(qdev, drv, qdev->ndev,
- "Command not supported by firmware.\n");
- status = -EINVAL;
- } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to get MPI traffic control.\n");
- status = -EIO;
- }
- return status;
-}
-
-int ql_wait_fifo_empty(struct ql_adapter *qdev)
-{
- int count = 5;
- u32 mgmnt_fifo_empty;
- u32 nic_fifo_empty;
-
- do {
- nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
- ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
- mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
- if (nic_fifo_empty && mgmnt_fifo_empty)
- return 0;
- msleep(100);
- } while (count-- > 0);
- return -ETIMEDOUT;
-}
-
-/* API called in work thread context to set new TX/RX
- * maximum frame size values to match MTU.
- */
-static int ql_set_port_cfg(struct ql_adapter *qdev)
-{
- int status;
- status = ql_mb_set_port_cfg(qdev);
- if (status)
- return status;
- status = ql_idc_wait(qdev);
- return status;
-}
-
-/* The following routines are worker threads that process
- * events that may sleep waiting for completion.
- */
-
-/* This thread gets the maximum TX and RX frame size values
- * from the firmware and, if necessary, changes them to match
- * the MTU setting.
- */
-void ql_mpi_port_cfg_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
- int status;
-
- status = ql_mb_get_port_cfg(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Failed to get port config data.\n");
- goto err;
- }
-
- if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
- qdev->max_frame_size ==
- CFG_DEFAULT_MAX_FRAME_SIZE)
- goto end;
-
- qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
- qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
- status = ql_set_port_cfg(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Failed to set port config data.\n");
- goto err;
- }
-end:
- clear_bit(QL_PORT_CFG, &qdev->flags);
- return;
-err:
- ql_queue_fw_error(qdev);
- goto end;
-}
-
-/* Process an inter-device request. This is issues by
- * the firmware in response to another function requesting
- * a change to the port. We set a flag to indicate a change
- * has been made and then send a mailbox command ACKing
- * the change request.
- */
-void ql_mpi_idc_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_idc_work.work);
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
- u32 aen;
- int timeout;
-
- aen = mbcp->mbox_out[1] >> 16;
- timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
-
- switch (aen) {
- default:
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Unhandled IDC action.\n");
- break;
- case MB_CMD_PORT_RESET:
- case MB_CMD_STOP_FW:
- ql_link_off(qdev);
- /* Fall through */
- case MB_CMD_SET_PORT_CFG:
- /* Signal the resulting link up AEN
- * that the frame routing and mac addr
- * needs to be set.
- * */
- set_bit(QL_CAM_RT_SET, &qdev->flags);
- /* Do ACK if required */
- if (timeout) {
- status = ql_mb_idc_ack(qdev);
- if (status)
- netif_err(qdev, drv, qdev->ndev,
- "Bug: No pending IDC!\n");
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "IDC ACK not required\n");
- status = 0; /* success */
- }
- break;
-
- /* These sub-commands issued by another (FCoE)
- * function are requesting to do an operation
- * on the shared resource (MPI environment).
- * We currently don't issue these so we just
- * ACK the request.
- */
- case MB_CMD_IOP_RESTART_MPI:
- case MB_CMD_IOP_PREP_LINK_DOWN:
- /* Drop the link, reload the routing
- * table when link comes up.
- */
- ql_link_off(qdev);
- set_bit(QL_CAM_RT_SET, &qdev->flags);
- /* Fall through. */
- case MB_CMD_IOP_DVR_START:
- case MB_CMD_IOP_FLASH_ACC:
- case MB_CMD_IOP_CORE_DUMP_MPI:
- case MB_CMD_IOP_PREP_UPDATE_MPI:
- case MB_CMD_IOP_COMP_UPDATE_MPI:
- case MB_CMD_IOP_NONE: /* an IDC without params */
- /* Do ACK if required */
- if (timeout) {
- status = ql_mb_idc_ack(qdev);
- if (status)
- netif_err(qdev, drv, qdev->ndev,
- "Bug: No pending IDC!\n");
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "IDC ACK not required\n");
- status = 0; /* success */
- }
- break;
- }
-}
-
-void ql_mpi_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_work.work);
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int err = 0;
-
- mutex_lock(&qdev->mpi_mutex);
- /* Begin polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
-
- while (ql_read32(qdev, STS) & STS_PI) {
- memset(mbcp, 0, sizeof(struct mbox_params));
- mbcp->out_count = 1;
- /* Don't continue if an async event
- * did not complete properly.
- */
- err = ql_mpi_handler(qdev, mbcp);
- if (err)
- break;
- }
-
- /* End polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- mutex_unlock(&qdev->mpi_mutex);
- ql_enable_completion_interrupt(qdev, 0);
-}
-
-void ql_mpi_reset_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_reset_work.work);
- cancel_delayed_work_sync(&qdev->mpi_work);
- cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
- cancel_delayed_work_sync(&qdev->mpi_idc_work);
- /* If we're not the dominant NIC function,
- * then there is nothing to do.
- */
- if (!ql_own_firmware(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
- return;
- }
-
- if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
- netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
- qdev->core_is_dumped = 1;
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_core_to_log, 5 * HZ);
- }
- ql_soft_reset_mpi_risc(qdev);
-}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 707665b62eb7..bebe38d74d66 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1385,15 +1385,13 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
}
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
- tpbuf->length = frag->size;
- tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
- frag->page.p, frag->page_offset,
- tpbuf->length, DMA_TO_DEVICE);
+ tpbuf->length = skb_frag_size(frag);
+ tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent,
+ frag, 0, tpbuf->length,
+ DMA_TO_DEVICE);
ret = dma_mapping_error(adpt->netdev->dev.parent,
tpbuf->dma_addr);
if (ret)
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 59c2349b59df..c84ab052ef26 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -544,7 +544,6 @@ static int emac_probe_resources(struct platform_device *pdev,
struct emac_adapter *adpt)
{
struct net_device *netdev = adpt->netdev;
- struct resource *res;
char maddr[ETH_ALEN];
int ret = 0;
@@ -556,22 +555,17 @@ static int emac_probe_resources(struct platform_device *pdev,
/* Core 0 interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "error: missing core0 irq resource (error=%i)\n", ret);
+ if (ret < 0)
return ret;
- }
adpt->irq.irq = ret;
/* base register address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adpt->base = devm_ioremap_resource(&pdev->dev, res);
+ adpt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adpt->base))
return PTR_ERR(adpt->base);
/* CSR register address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- adpt->csr = devm_ioremap_resource(&pdev->dev, res);
+ adpt->csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(adpt->csr))
return PTR_ERR(adpt->csr);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index bcb890b18a94..702aa217a27a 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -131,17 +131,10 @@ DEFINE_SHOW_ATTRIBUTE(qcaspi_info);
void
qcaspi_init_device_debugfs(struct qcaspi *qca)
{
- struct dentry *device_root;
+ qca->device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev),
+ NULL);
- device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev), NULL);
- qca->device_root = device_root;
-
- if (IS_ERR(device_root) || !device_root) {
- pr_warn("failed to create debugfs directory for %s\n",
- dev_name(&qca->net_dev->dev));
- return;
- }
- debugfs_create_file("info", S_IFREG | 0444, device_root, qca,
+ debugfs_create_file("info", S_IFREG | 0444, qca->device_root, qca,
&qcaspi_info_fops);
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index b28360bc2255..5ecf61df78bd 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -837,8 +837,7 @@ qcaspi_netdev_uninit(struct net_device *dev)
kfree(qca->rx_buffer);
qca->buffer_size = 0;
- if (qca->rx_skb)
- dev_kfree_skb(qca->rx_skb);
+ dev_kfree_skb(qca->rx_skb);
}
static const struct net_device_ops qcaspi_netdev_ops = {
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 590616846cd1..0981068504fa 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -285,8 +285,7 @@ static void qcauart_netdev_uninit(struct net_device *dev)
{
struct qcauart *qca = netdev_priv(dev);
- if (qca->rx_skb)
- dev_kfree_skb(qca->rx_skb);
+ dev_kfree_skb(qca->rx_skb);
}
static const struct net_device_ops qcauart_netdev_ops = {
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index b18e7a91d5cd..5e0b9d2f14f7 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -96,14 +96,19 @@ config 8139_OLD_RX_RESET
old RX-reset behavior. If unsure, say N.
config R8169
- tristate "Realtek 8169 gigabit ethernet support"
+ tristate "Realtek 8169/8168/8101/8125 ethernet support"
depends on PCI
select FW_LOADER
select CRC32
select PHYLIB
select REALTEK_PHY
---help---
- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
+ Say Y here if you have a Realtek Ethernet adapter belonging to
+ the following families:
+ RTL8169 Gigabit Ethernet
+ RTL8168 Gigabit Ethernet
+ RTL8101 Fast Ethernet
+ RTL8125 2.5GBit Ethernet
To compile this driver as a module, choose M here: the module
will be called r8169. This is recommended.
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index bae0074ab9aa..0ef01db1f8b8 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -55,13 +55,14 @@
#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
+#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
-static const int multicast_filter_limit = 32;
+#define MC_FILTER_LIMIT 32
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -135,6 +136,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_49,
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
+ RTL_GIGA_MAC_VER_60,
+ RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_NONE
};
@@ -200,6 +203,8 @@ static const struct {
[RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_60] = {"RTL8125" },
+ [RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -220,6 +225,8 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(USR, 0x0116) },
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
+ { PCI_VDEVICE(REALTEK, 0x8125) },
+ { PCI_VDEVICE(REALTEK, 0x3000) },
{}
};
@@ -271,7 +278,6 @@ enum rtl_registers {
Config3 = 0x54,
Config4 = 0x55,
Config5 = 0x56,
- MultiIntr = 0x5c,
PHYAR = 0x60,
PHYstatus = 0x6c,
RxMaxSize = 0xda,
@@ -385,6 +391,19 @@ enum rtl8168_registers {
#define EARLY_TALLY_EN (1 << 16)
};
+enum rtl8125_registers {
+ IntrMask_8125 = 0x38,
+ IntrStatus_8125 = 0x3c,
+ TxPoll_8125 = 0x90,
+ MAC0_BKP = 0x19e0,
+};
+
+#define RX_VLAN_INNER_8125 BIT(22)
+#define RX_VLAN_OUTER_8125 BIT(23)
+#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
+
+#define RX_FETCH_DFLT_8125 (8 << 27)
+
enum rtl_register_content {
/* InterruptStatusBits */
SYSErr = 0x8000,
@@ -539,11 +558,11 @@ enum rtl_tx_desc_bit_1 {
TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */
TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */
#define GTTCPHO_SHIFT 18
-#define GTTCPHO_MAX 0x7fU
+#define GTTCPHO_MAX 0x7f
/* Second doubleword. */
#define TCPHO_SHIFT 18
-#define TCPHO_MAX 0x3ffU
+#define TCPHO_MAX 0x3ff
#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */
TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */
@@ -569,6 +588,11 @@ enum rtl_rx_desc_bit {
#define RsvdMask 0x3fffc000
+#define RTL_GSO_MAX_SIZE_V1 32000
+#define RTL_GSO_MAX_SEGS_V1 24
+#define RTL_GSO_MAX_SIZE_V2 64000
+#define RTL_GSO_MAX_SEGS_V2 64
+
struct TxDesc {
__le32 opts1;
__le32 opts2;
@@ -638,10 +662,10 @@ struct rtl8169_private {
struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
dma_addr_t TxPhyAddr;
dma_addr_t RxPhyAddr;
- void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
+ struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
- u16 irq_mask;
+ u32 irq_mask;
struct clk *clk;
struct {
@@ -691,6 +715,7 @@ MODULE_FIRMWARE(FIRMWARE_8168H_1);
MODULE_FIRMWARE(FIRMWARE_8168H_2);
MODULE_FIRMWARE(FIRMWARE_8107E_1);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
+MODULE_FIRMWARE(FIRMWARE_8125A_3);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -723,12 +748,33 @@ static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
PCI_EXP_DEVCTL_READRQ, force);
}
+static bool rtl_is_8125(struct rtl8169_private *tp)
+{
+ return tp->mac_version >= RTL_GIGA_MAC_VER_60;
+}
+
static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_39 &&
+ tp->mac_version <= RTL_GIGA_MAC_VER_51;
+}
+
+static bool rtl_supports_eee(struct rtl8169_private *tp)
+{
+ return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_37 &&
tp->mac_version != RTL_GIGA_MAC_VER_39;
}
+static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac[i] = RTL_R8(tp, reg + i);
+}
+
struct rtl_cond {
bool (*check)(struct rtl8169_private *);
const char *msg;
@@ -846,6 +892,14 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
return RTL_R32(tp, OCPDR);
}
+static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
+ u16 set)
+{
+ u16 data = r8168_mac_ocp_read(tp, reg);
+
+ r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
+}
+
#define OCP_STD_PHY_BASE 0xa400
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
@@ -995,7 +1049,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1012,7 +1066,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1294,14 +1348,28 @@ static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
-static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
+static u32 rtl_get_events(struct rtl8169_private *tp)
+{
+ if (rtl_is_8125(tp))
+ return RTL_R32(tp, IntrStatus_8125);
+ else
+ return RTL_R16(tp, IntrStatus);
+}
+
+static void rtl_ack_events(struct rtl8169_private *tp, u32 bits)
{
- RTL_W16(tp, IntrStatus, bits);
+ if (rtl_is_8125(tp))
+ RTL_W32(tp, IntrStatus_8125, bits);
+ else
+ RTL_W16(tp, IntrStatus, bits);
}
static void rtl_irq_disable(struct rtl8169_private *tp)
{
- RTL_W16(tp, IntrMask, 0);
+ if (rtl_is_8125(tp))
+ RTL_W32(tp, IntrMask_8125, 0);
+ else
+ RTL_W16(tp, IntrMask, 0);
tp->irq_enabled = 0;
}
@@ -1312,13 +1380,16 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
static void rtl_irq_enable(struct rtl8169_private *tp)
{
tp->irq_enabled = 1;
- RTL_W16(tp, IntrMask, tp->irq_mask);
+ if (rtl_is_8125(tp))
+ RTL_W32(tp, IntrMask_8125, tp->irq_mask);
+ else
+ RTL_W16(tp, IntrMask, tp->irq_mask);
}
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
rtl_irq_disable(tp);
- rtl_ack_events(tp, 0xffff);
+ rtl_ack_events(tp, 0xffffffff);
/* PCI commit */
RTL_R8(tp, ChipCmd);
}
@@ -1377,7 +1448,6 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{
- unsigned int i, tmp;
static const struct {
u32 opt;
u16 reg;
@@ -1390,20 +1460,25 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{ WAKE_ANY, Config5, LanWake },
{ WAKE_MAGIC, Config3, MagicPacket }
};
+ unsigned int i, tmp = ARRAY_SIZE(cfg);
u8 options;
rtl_unlock_config_regs(tp);
if (rtl_is_8168evl_up(tp)) {
- tmp = ARRAY_SIZE(cfg) - 1;
+ tmp--;
if (wolopts & WAKE_MAGIC)
rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
MagicPacket_v2);
else
rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
MagicPacket_v2);
- } else {
- tmp = ARRAY_SIZE(cfg);
+ } else if (rtl_is_8125(tp)) {
+ tmp--;
+ if (wolopts & WAKE_MAGIC)
+ r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
+ else
+ r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
}
for (i = 0; i < tmp; i++) {
@@ -1414,18 +1489,22 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
options = RTL_R8(tp, Config1) & ~PMEnable;
if (wolopts)
options |= PMEnable;
RTL_W8(tp, Config1, options);
break;
- default:
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
RTL_W8(tp, Config2, options);
break;
+ default:
+ break;
}
rtl_lock_config_regs(tp);
@@ -1505,6 +1584,13 @@ static int rtl8169_set_features(struct net_device *dev,
else
rx_config &= ~(AcceptErr | AcceptRunt);
+ if (rtl_is_8125(tp)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ rx_config |= RX_VLAN_8125;
+ else
+ rx_config &= ~RX_VLAN_8125;
+ }
+
RTL_W32(tp, RxConfig, rx_config);
if (features & NETIF_F_RXCSUM)
@@ -1512,10 +1598,12 @@ static int rtl8169_set_features(struct net_device *dev,
else
tp->cp_cmd &= ~RxChkSum;
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- tp->cp_cmd |= RxVlan;
- else
- tp->cp_cmd &= ~RxVlan;
+ if (!rtl_is_8125(tp)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
+ }
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_R16(tp, CPlusCmd);
@@ -1814,6 +1902,9 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
int i;
u16 w;
+ if (rtl_is_8125(tp))
+ return -EOPNOTSUPP;
+
memset(ec, 0, sizeof(*ec));
/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
@@ -1882,6 +1973,9 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
u16 w = 0, cp01;
int i;
+ if (rtl_is_8125(tp))
+ return -EOPNOTSUPP;
+
scale = rtl_coalesce_choose_scale(dev,
max(p[0].usecs, p[1].usecs) * 1000, &cp01);
if (IS_ERR(scale))
@@ -1929,144 +2023,40 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
return 0;
}
-static int rtl_get_eee_supp(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5c, 0x12);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5d, 0x11);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_get_eee_adv(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5d, 0x10);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
-{
- struct phy_device *phydev = tp->phydev;
- int ret = 0;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write_paged(phydev, 0x0a5d, 0x10, val);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
int ret;
+ if (!rtl_supports_eee(tp))
+ return -EOPNOTSUPP;
+
pm_runtime_get_noresume(d);
if (!pm_runtime_active(d)) {
ret = -EOPNOTSUPP;
- goto out;
+ } else {
+ ret = phy_ethtool_get_eee(tp->phydev, data);
}
- /* Get Supported EEE */
- ret = rtl_get_eee_supp(tp);
- if (ret < 0)
- goto out;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(ret);
-
- /* Get advertisement EEE */
- ret = rtl_get_eee_adv(tp);
- if (ret < 0)
- goto out;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
- data->eee_enabled = !!data->advertised;
-
- /* Get LP advertisement EEE */
- ret = rtl_get_eee_lpadv(tp);
- if (ret < 0)
- goto out;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
- data->eee_active = !!(data->advertised & data->lp_advertised);
-out:
pm_runtime_put_noidle(d);
- return ret < 0 ? ret : 0;
+
+ return ret;
}
static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
- int old_adv, adv = 0, cap, ret;
+ int ret;
+
+ if (!rtl_supports_eee(tp))
+ return -EOPNOTSUPP;
pm_runtime_get_noresume(d);
- if (!dev->phydev || !pm_runtime_active(d)) {
+ if (!pm_runtime_active(d)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -2077,38 +2067,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
goto out;
}
- /* Get Supported EEE */
- ret = rtl_get_eee_supp(tp);
- if (ret < 0)
- goto out;
- cap = ret;
-
- ret = rtl_get_eee_adv(tp);
- if (ret < 0)
- goto out;
- old_adv = ret;
-
- if (data->eee_enabled) {
- adv = !data->advertised ? cap :
- ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
- /* Mask prohibited EEE modes */
- adv &= ~dev->phydev->eee_broken_modes;
- }
-
- if (old_adv != adv) {
- ret = rtl_set_eee_adv(tp, adv);
- if (ret < 0)
- goto out;
-
- /* Restart autonegotiation so the new modes get sent to the
- * link partner.
- */
- ret = phy_restart_aneg(dev->phydev);
- }
-
+ ret = phy_ethtool_set_eee(tp->phydev, data);
out:
pm_runtime_put_noidle(d);
- return ret < 0 ? ret : 0;
+ return ret;
}
static const struct ethtool_ops rtl8169_ethtool_ops = {
@@ -2135,10 +2097,11 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
static void rtl_enable_eee(struct rtl8169_private *tp)
{
- int supported = rtl_get_eee_supp(tp);
+ struct phy_device *phydev = tp->phydev;
+ int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
if (supported > 0)
- rtl_set_eee_adv(tp, supported);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp)
@@ -2159,6 +2122,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
u16 val;
u16 mac_version;
} mac_info[] = {
+ /* 8125 family. */
+ { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
+ { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+
/* 8168EP family. */
{ 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
{ 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
@@ -2304,6 +2271,12 @@ static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
}
+static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
+{
+ r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
+ r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
+}
+
static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
@@ -2324,6 +2297,26 @@ static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
}
+static void rtl8168h_config_eee_phy(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ rtl8168g_config_eee_phy(tp);
+
+ phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
+ phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
+}
+
+static void rtl8125_config_eee_phy(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ rtl8168h_config_eee_phy(tp);
+
+ phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3391,7 +3384,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
+ rtl8168h_config_eee_phy(tp);
rtl_enable_eee(tp);
}
@@ -3644,6 +3637,134 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
}
+static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
+ phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
+ phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
+ phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x80ea);
+ phy_modify(phydev, 0x14, 0xff00, 0xc400);
+ phy_write(phydev, 0x13, 0x80eb);
+ phy_modify(phydev, 0x14, 0x0700, 0x0300);
+ phy_write(phydev, 0x13, 0x80f8);
+ phy_modify(phydev, 0x14, 0xff00, 0x1c00);
+ phy_write(phydev, 0x13, 0x80f1);
+ phy_modify(phydev, 0x14, 0xff00, 0x3000);
+ phy_write(phydev, 0x13, 0x80fe);
+ phy_modify(phydev, 0x14, 0xff00, 0xa500);
+ phy_write(phydev, 0x13, 0x8102);
+ phy_modify(phydev, 0x14, 0xff00, 0x5000);
+ phy_write(phydev, 0x13, 0x8105);
+ phy_modify(phydev, 0x14, 0xff00, 0x3300);
+ phy_write(phydev, 0x13, 0x8100);
+ phy_modify(phydev, 0x14, 0xff00, 0x7000);
+ phy_write(phydev, 0x13, 0x8104);
+ phy_modify(phydev, 0x14, 0xff00, 0xf000);
+ phy_write(phydev, 0x13, 0x8106);
+ phy_modify(phydev, 0x14, 0xff00, 0x6500);
+ phy_write(phydev, 0x13, 0x80dc);
+ phy_modify(phydev, 0x14, 0xff00, 0xed00);
+ phy_write(phydev, 0x13, 0x80df);
+ phy_set_bits(phydev, 0x14, BIT(8));
+ phy_write(phydev, 0x13, 0x80e1);
+ phy_clear_bits(phydev, 0x14, BIT(8));
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
+ phy_write_paged(phydev, 0xa43, 0x13, 0x819f);
+ phy_write_paged(phydev, 0xa43, 0x14, 0xd0b6);
+
+ phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
+ phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
+ phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(tp);
+ rtl_enable_eee(tp);
+}
+
+static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+ int i;
+
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000);
+ phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002);
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044);
+ phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000);
+ phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000);
+ phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002);
+ phy_write_paged(phydev, 0xad4, 0x16, 0x00a8);
+ phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
+
+ phy_write(phydev, 0x1f, 0x0b87);
+ phy_write(phydev, 0x16, 0x80a2);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x16, 0x809c);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x81B3);
+ phy_write(phydev, 0x14, 0x0043);
+ phy_write(phydev, 0x14, 0x00A7);
+ phy_write(phydev, 0x14, 0x00D6);
+ phy_write(phydev, 0x14, 0x00EC);
+ phy_write(phydev, 0x14, 0x00F6);
+ phy_write(phydev, 0x14, 0x00FB);
+ phy_write(phydev, 0x14, 0x00FD);
+ phy_write(phydev, 0x14, 0x00FF);
+ phy_write(phydev, 0x14, 0x00BB);
+ phy_write(phydev, 0x14, 0x0058);
+ phy_write(phydev, 0x14, 0x0029);
+ phy_write(phydev, 0x14, 0x0013);
+ phy_write(phydev, 0x14, 0x0009);
+ phy_write(phydev, 0x14, 0x0004);
+ phy_write(phydev, 0x14, 0x0002);
+ for (i = 0; i < 25; i++)
+ phy_write(phydev, 0x14, 0x0000);
+
+ phy_write(phydev, 0x13, 0x8257);
+ phy_write(phydev, 0x14, 0x020F);
+
+ phy_write(phydev, 0x13, 0x80EA);
+ phy_write(phydev, 0x14, 0x7843);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl_apply_firmware(tp);
+
+ phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x81a2);
+ phy_set_bits(phydev, 0x14, BIT(8));
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
+ phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
+ phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(tp);
+ rtl_enable_eee(tp);
+}
+
static void rtl_hw_phy_config(struct net_device *dev)
{
static const rtl_generic_fct phy_configs[] = {
@@ -3699,6 +3820,8 @@ static void rtl_hw_phy_config(struct net_device *dev)
[RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
};
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3826,6 +3949,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_60:
+ case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
case RTL_GIGA_MAC_VER_40:
@@ -3855,6 +3980,8 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_60:
+ case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
break;
case RTL_GIGA_MAC_VER_40:
@@ -3887,6 +4014,10 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_VLAN_8125 |
+ RX_DMA_BURST);
+ break;
default:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
break;
@@ -4146,54 +4277,46 @@ static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_versi
static void rtl_set_rx_mode(struct net_device *dev)
{
+ u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast;
+ /* Multicast hash filter */
+ u32 mc_filter[2] = { 0xffffffff, 0xffffffff };
struct rtl8169_private *tp = netdev_priv(dev);
- u32 mc_filter[2]; /* Multicast hash filter */
- int rx_mode;
- u32 tmp = 0;
+ u32 tmp;
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
- rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
+ rx_mode |= AcceptAllPhys;
+ } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ dev->flags & IFF_ALLMULTI ||
+ tp->mac_version == RTL_GIGA_MAC_VER_35) {
+ /* accept all multicasts */
+ } else if (netdev_mc_empty(dev)) {
+ rx_mode &= ~AcceptMulticast;
} else {
struct netdev_hw_addr *ha;
- rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- rx_mode |= AcceptMulticast;
+ u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
+ }
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
+ tmp = mc_filter[0];
+ mc_filter[0] = swab32(mc_filter[1]);
+ mc_filter[1] = swab32(tmp);
}
}
if (dev->features & NETIF_F_RXALL)
rx_mode |= (AcceptErr | AcceptRunt);
- tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
-
- if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
- u32 data = mc_filter[0];
-
- mc_filter[0] = swab32(mc_filter[1]);
- mc_filter[1] = swab32(data);
- }
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_35)
- mc_filter[1] = mc_filter[0] = 0xffffffff;
-
RTL_W32(tp, MAR0 + 4, mc_filter[1]);
RTL_W32(tp, MAR0 + 0, mc_filter[0]);
- RTL_W32(tp, RxConfig, tmp);
+ tmp = RTL_R32(tp, RxConfig);
+ RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode);
}
DECLARE_RTL_COND(rtl_csiar_cond)
@@ -4407,7 +4530,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168c_2[] = {
{ 0x01, 0, 0x0001 },
- { 0x03, 0x0400, 0x0220 }
+ { 0x03, 0x0400, 0x0020 }
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4454,7 +4577,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
static const struct ephy_info e_info_8168d_4[] = {
{ 0x0b, 0x0000, 0x0048 },
{ 0x19, 0x0020, 0x0050 },
- { 0x0c, 0x0100, 0x0020 }
+ { 0x0c, 0x0100, 0x0020 },
+ { 0x10, 0x0004, 0x0000 },
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4504,7 +4628,9 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168e_2[] = {
{ 0x09, 0x0000, 0x0080 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4566,7 +4692,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
{ 0x06, 0x00c0, 0x0020 },
{ 0x08, 0x0001, 0x0002 },
{ 0x09, 0x0000, 0x0080 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_hw_start_8168f(tp);
@@ -4581,8 +4709,9 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
static const struct ephy_info e_info_8168f_1[] = {
{ 0x06, 0x00c0, 0x0020 },
{ 0x0f, 0xffff, 0x5200 },
- { 0x1e, 0x0000, 0x4000 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_hw_start_8168f(tp);
@@ -4621,8 +4750,8 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168g_1[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x37d0, 0x0820 },
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x3ff0, 0x0820 },
{ 0x1e, 0x0000, 0x0001 },
{ 0x19, 0x8000, 0x0000 }
};
@@ -4638,10 +4767,15 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168g_2[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x3df0, 0x0200 },
- { 0x19, 0xffff, 0xfc00 },
- { 0x1e, 0xffff, 0x20eb }
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x3ff0, 0x0820 },
+ { 0x19, 0xffff, 0x7c00 },
+ { 0x1e, 0xffff, 0x20eb },
+ { 0x0d, 0xffff, 0x1666 },
+ { 0x00, 0xffff, 0x10a3 },
+ { 0x06, 0xffff, 0xf050 },
+ { 0x04, 0x0000, 0x0010 },
+ { 0x1d, 0x4000, 0x0000 },
};
rtl_hw_start_8168g(tp);
@@ -4655,11 +4789,16 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8411_2[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x3df0, 0x0200 },
- { 0x0f, 0xffff, 0x5200 },
- { 0x19, 0x0020, 0x0000 },
- { 0x1e, 0x0000, 0x2000 }
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x37d0, 0x0820 },
+ { 0x1e, 0x0000, 0x0001 },
+ { 0x19, 0x8021, 0x0000 },
+ { 0x1e, 0x0000, 0x2000 },
+ { 0x0d, 0x0100, 0x0200 },
+ { 0x00, 0x0000, 0x0080 },
+ { 0x06, 0x0000, 0x0010 },
+ { 0x04, 0x0000, 0x0010 },
+ { 0x1d, 0x0000, 0x4000 },
};
rtl_hw_start_8168g(tp);
@@ -4809,16 +4948,15 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
{
- int rg_saw_cnt;
- u32 data;
static const struct ephy_info e_info_8168h_1[] = {
{ 0x1e, 0x0800, 0x0001 },
{ 0x1d, 0x0000, 0x0800 },
{ 0x05, 0xffff, 0x2089 },
{ 0x06, 0xffff, 0x5881 },
- { 0x04, 0xffff, 0x154a },
+ { 0x04, 0xffff, 0x854a },
{ 0x01, 0xffff, 0x068b }
};
+ int rg_saw_cnt;
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
@@ -4863,31 +5001,13 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
sw_cnt_1ms_ini &= 0x0fff;
- data = r8168_mac_ocp_read(tp, 0xd412);
- data &= ~0x0fff;
- data |= sw_cnt_1ms_ini;
- r8168_mac_ocp_write(tp, 0xd412, data);
+ r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- data = r8168_mac_ocp_read(tp, 0xe056);
- data &= ~0xf0;
- data |= 0x70;
- r8168_mac_ocp_write(tp, 0xe056, data);
-
- data = r8168_mac_ocp_read(tp, 0xe052);
- data &= ~0x6000;
- data |= 0x8008;
- r8168_mac_ocp_write(tp, 0xe052, data);
-
- data = r8168_mac_ocp_read(tp, 0xe0d6);
- data &= ~0x01ff;
- data |= 0x017f;
- r8168_mac_ocp_write(tp, 0xe0d6, data);
-
- data = r8168_mac_ocp_read(tp, 0xd420);
- data &= ~0x0fff;
- data |= 0x047f;
- r8168_mac_ocp_write(tp, 0xd420, data);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
+ r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
+ r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
@@ -4969,12 +5089,11 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
{
- u32 data;
static const struct ephy_info e_info_8168ep_3[] = {
- { 0x00, 0xffff, 0x10a3 },
- { 0x19, 0xffff, 0x7c00 },
- { 0x1e, 0xffff, 0x20eb },
- { 0x0d, 0xffff, 0x1666 }
+ { 0x00, 0x0000, 0x0080 },
+ { 0x0d, 0x0100, 0x0200 },
+ { 0x19, 0x8021, 0x0000 },
+ { 0x1e, 0x0000, 0x2000 },
};
/* disable aspm and clock request before access ephy */
@@ -4986,18 +5105,9 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
- data = r8168_mac_ocp_read(tp, 0xd3e2);
- data &= 0xf000;
- data |= 0x0271;
- r8168_mac_ocp_write(tp, 0xd3e2, data);
-
- data = r8168_mac_ocp_read(tp, 0xd3e4);
- data &= 0xff00;
- r8168_mac_ocp_write(tp, 0xd3e4, data);
-
- data = r8168_mac_ocp_read(tp, 0xe860);
- data |= 0x0080;
- r8168_mac_ocp_write(tp, 0xe860, data);
+ r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271);
+ r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5125,6 +5235,128 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
+DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
+{
+ return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13);
+}
+
+static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
+{
+ rtl_pcie_state_l2l3_disable(tp);
+
+ RTL_W16(tp, 0x382, 0x221b);
+ RTL_W8(tp, 0x4500, 0);
+ RTL_W16(tp, 0x4800, 0);
+
+ /* disable UPS */
+ r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
+
+ RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10);
+
+ r8168_mac_ocp_write(tp, 0xc140, 0xffff);
+ r8168_mac_ocp_write(tp, 0xc142, 0xffff);
+
+ r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9);
+ r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
+
+ /* disable new tx descriptor format */
+ r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
+
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020);
+ r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
+ r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
+ r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
+ r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
+ r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0067);
+ r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00);
+ r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
+ r8168_mac_ocp_modify(tp, 0xe84c, 0x0000, 0x00c0);
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001);
+ udelay(1);
+ r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000);
+ RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030);
+
+ r8168_mac_ocp_write(tp, 0xe098, 0xc302);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
+
+ rtl8125_config_eee_mac(tp);
+
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ udelay(10);
+}
+
+static void rtl_hw_start_8125_1(struct rtl8169_private *tp)
+{
+ static const struct ephy_info e_info_8125_1[] = {
+ { 0x01, 0xffff, 0xa812 },
+ { 0x09, 0xffff, 0x520c },
+ { 0x04, 0xffff, 0xd000 },
+ { 0x0d, 0xffff, 0xf702 },
+ { 0x0a, 0xffff, 0x8653 },
+ { 0x06, 0xffff, 0x001e },
+ { 0x08, 0xffff, 0x3595 },
+ { 0x20, 0xffff, 0x9455 },
+ { 0x21, 0xffff, 0x99ff },
+ { 0x02, 0xffff, 0x6046 },
+ { 0x29, 0xffff, 0xfe00 },
+ { 0x23, 0xffff, 0xab62 },
+
+ { 0x41, 0xffff, 0xa80c },
+ { 0x49, 0xffff, 0x520c },
+ { 0x44, 0xffff, 0xd000 },
+ { 0x4d, 0xffff, 0xf702 },
+ { 0x4a, 0xffff, 0x8653 },
+ { 0x46, 0xffff, 0x001e },
+ { 0x48, 0xffff, 0x3595 },
+ { 0x60, 0xffff, 0x9455 },
+ { 0x61, 0xffff, 0x99ff },
+ { 0x42, 0xffff, 0x6046 },
+ { 0x69, 0xffff, 0xfe00 },
+ { 0x63, 0xffff, 0xab62 },
+ };
+
+ rtl_set_def_aspm_entry_latency(tp);
+
+ /* disable aspm and clock request before access ephy */
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_ephy_init(tp, e_info_8125_1);
+
+ rtl_hw_start_8125_common(tp);
+}
+
+static void rtl_hw_start_8125_2(struct rtl8169_private *tp)
+{
+ static const struct ephy_info e_info_8125_2[] = {
+ { 0x04, 0xffff, 0xd000 },
+ { 0x0a, 0xffff, 0x8653 },
+ { 0x23, 0xffff, 0xab66 },
+ { 0x20, 0xffff, 0x9455 },
+ { 0x21, 0xffff, 0x99ff },
+ { 0x29, 0xffff, 0xfe04 },
+
+ { 0x44, 0xffff, 0xd000 },
+ { 0x4a, 0xffff, 0x8653 },
+ { 0x63, 0xffff, 0xab66 },
+ { 0x60, 0xffff, 0x9455 },
+ { 0x61, 0xffff, 0x99ff },
+ { 0x69, 0xffff, 0xfe04 },
+ };
+
+ rtl_set_def_aspm_entry_latency(tp);
+
+ /* disable aspm and clock request before access ephy */
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_ephy_init(tp, e_info_8125_2);
+
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_config(struct rtl8169_private *tp)
{
static const rtl_generic_fct hw_configs[] = {
@@ -5173,12 +5405,25 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
+ [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1,
+ [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2,
};
if (hw_configs[tp->mac_version])
hw_configs[tp->mac_version](tp);
}
+static void rtl_hw_start_8125(struct rtl8169_private *tp)
+{
+ int i;
+
+ /* disable interrupt coalescing */
+ for (i = 0xa00; i < 0xb00; i += 4)
+ RTL_W32(tp, i, 0);
+
+ rtl_hw_config(tp);
+}
+
static void rtl_hw_start_8168(struct rtl8169_private *tp)
{
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
@@ -5192,6 +5437,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
rtl_hw_config(tp);
+
+ /* disable interrupt coalescing */
+ RTL_W16(tp, IntrMitigate, 0x0000);
}
static void rtl_hw_start_8169(struct rtl8169_private *tp)
@@ -5215,6 +5463,9 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
rtl8169_set_magic_reg(tp, tp->mac_version);
RTL_W32(tp, RxMissed, 0);
+
+ /* disable interrupt coalescing */
+ RTL_W16(tp, IntrMitigate, 0x0000);
}
static void rtl_hw_start(struct rtl8169_private *tp)
@@ -5226,6 +5477,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
rtl_hw_start_8169(tp);
+ else if (rtl_is_8125(tp))
+ rtl_hw_start_8125(tp);
else
rtl_hw_start_8168(tp);
@@ -5233,17 +5486,12 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
- /* disable interrupt coalescing */
- RTL_W16(tp, IntrMitigate, 0x0000);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R8(tp, IntrMask);
+ RTL_R16(tp, CPlusCmd);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
-
rtl_set_rx_mode(tp->dev);
- /* no early-rx interrupts */
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
rtl_irq_enable(tp);
}
@@ -5268,17 +5516,6 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
}
-static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
- void **data_buff, struct RxDesc *desc)
-{
- dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr),
- R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
-
- kfree(*data_buff);
- *data_buff = NULL;
- rtl8169_make_unusable_by_asic(desc);
-}
-
static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
@@ -5289,49 +5526,43 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
}
-static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
- struct RxDesc *desc)
+static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
+ struct RxDesc *desc)
{
- void *data;
- dma_addr_t mapping;
struct device *d = tp_to_dev(tp);
int node = dev_to_node(d);
+ dma_addr_t mapping;
+ struct page *data;
- data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node);
+ data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE));
if (!data)
return NULL;
- /* Memory should be properly aligned, but better check. */
- if (!IS_ALIGNED((unsigned long)data, 8)) {
- netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n");
- goto err_out;
- }
-
- mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
if (net_ratelimit())
netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
- goto err_out;
+ __free_pages(data, get_order(R8169_RX_BUF_SIZE));
+ return NULL;
}
desc->addr = cpu_to_le64(mapping);
rtl8169_mark_to_asic(desc);
- return data;
-err_out:
- kfree(data);
- return NULL;
+ return data;
}
static void rtl8169_rx_clear(struct rtl8169_private *tp)
{
unsigned int i;
- for (i = 0; i < NUM_RX_DESC; i++) {
- if (tp->Rx_databuff[i]) {
- rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
- tp->RxDescArray + i);
- }
+ for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) {
+ dma_unmap_page(tp_to_dev(tp),
+ le64_to_cpu(tp->RxDescArray[i].addr),
+ R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
+ tp->Rx_databuff[i] = NULL;
+ rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
}
}
@@ -5345,7 +5576,7 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
unsigned int i;
for (i = 0; i < NUM_RX_DESC; i++) {
- void *data;
+ struct page *data;
data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
if (!data) {
@@ -5507,44 +5738,6 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
}
-static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-/* r8169_csum_workaround()
- * The hw limites the value the transport offset. When the offset is out of the
- * range, calculate the checksum by sw.
- */
-static void r8169_csum_workaround(struct rtl8169_private *tp,
- struct sk_buff *skb)
-{
- if (skb_is_gso(skb)) {
- netdev_features_t features = tp->dev->features;
- struct sk_buff *segs, *nskb;
-
- features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- segs = skb_gso_segment(skb, features);
- if (IS_ERR(segs) || !segs)
- goto drop;
-
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- rtl8169_start_xmit(nskb, tp->dev);
- } while (segs);
-
- dev_consume_skb_any(skb);
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb_checksum_help(skb) < 0)
- goto drop;
-
- rtl8169_start_xmit(skb, tp->dev);
- } else {
-drop:
- tp->dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- }
-}
-
/* msdn_giant_send_check()
* According to the document of microsoft, the TCP Pseudo Header excludes the
* packet length for IPv6 TCP large packets.
@@ -5594,13 +5787,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
u32 mss = skb_shinfo(skb)->gso_size;
if (mss) {
- if (transport_offset > GTTCPHO_MAX) {
- netif_warn(tp, tx_err, tp->dev,
- "Invalid transport offset 0x%x for TSO\n",
- transport_offset);
- return false;
- }
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[0] |= TD1_GTSENV4;
@@ -5623,16 +5809,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
- if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return !(skb_checksum_help(skb) || eth_skb_pad(skb));
-
- if (transport_offset > TCPHO_MAX) {
- netif_warn(tp, tx_err, tp->dev,
- "Invalid transport offset 0x%x\n",
- transport_offset);
- return false;
- }
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[1] |= TD1_IPv4_CS;
@@ -5686,6 +5862,14 @@ static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
}
}
+static void rtl8169_doorbell(struct rtl8169_private *tp)
+{
+ if (rtl_is_8125(tp))
+ RTL_W16(tp, TxPoll_8125, BIT(0));
+ else
+ RTL_W8(tp, TxPoll, NPQ);
+}
+
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -5695,6 +5879,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct device *d = tp_to_dev(tp);
dma_addr_t mapping;
u32 opts[2], len;
+ bool stop_queue;
+ bool door_bell;
int frags;
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
@@ -5709,10 +5895,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
opts[0] = DescOwn;
if (rtl_chip_supports_csum_v2(tp)) {
- if (!rtl8169_tso_csum_v2(tp, skb, opts)) {
- r8169_csum_workaround(tp, skb);
- return NETDEV_TX_OK;
- }
+ if (!rtl8169_tso_csum_v2(tp, skb, opts))
+ goto err_dma_0;
} else {
rtl8169_tso_csum_v1(skb, opts);
}
@@ -5740,13 +5924,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
+ door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
+
txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
/* Force all memory writes to complete before notifying device */
@@ -5754,14 +5938,20 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- RTL_W8(tp, TxPoll, NPQ);
-
- if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
+ stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ if (unlikely(stop_queue)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
smp_wmb();
netif_stop_queue(dev);
+ door_bell = true;
+ }
+
+ if (door_bell)
+ rtl8169_doorbell(tp);
+
+ if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -5789,6 +5979,39 @@ err_stop_0:
return NETDEV_TX_BUSY;
}
+static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ int transport_offset = skb_transport_offset(skb);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (skb_is_gso(skb)) {
+ if (transport_offset > GTTCPHO_MAX &&
+ rtl_chip_supports_csum_v2(tp))
+ features &= ~NETIF_F_ALL_TSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->len < ETH_ZLEN) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_34:
+ features &= ~NETIF_F_CSUM_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (transport_offset > TCPHO_MAX &&
+ rtl_chip_supports_csum_v2(tp))
+ features &= ~NETIF_F_CSUM_MASK;
+ }
+
+ return vlan_features_check(skb, features);
+}
+
static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -5850,7 +6073,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
tp->TxDescArray + entry);
- if (status & LastFrag) {
+ if (tx_skb->skb) {
pkts_compl++;
bytes_compl += tx_skb->skb->len;
napi_consume_skb(tx_skb->skb, budget);
@@ -5888,7 +6111,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* it is slow enough). -- FR
*/
if (tp->cur_tx != dirty_tx)
- RTL_W8(tp, TxPoll, NPQ);
+ rtl8169_doorbell(tp);
}
}
@@ -5908,24 +6131,6 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
skb_checksum_none_assert(skb);
}
-static struct sk_buff *rtl8169_try_rx_copy(void *data,
- struct rtl8169_private *tp,
- int pkt_size,
- dma_addr_t addr)
-{
- struct sk_buff *skb;
- struct device *d = tp_to_dev(tp);
-
- dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
- prefetch(data);
- skb = napi_alloc_skb(&tp->napi, pkt_size);
- if (skb)
- skb_copy_to_linear_data(skb, data, pkt_size);
- dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
-
- return skb;
-}
-
static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
{
unsigned int cur_rx, rx_left;
@@ -5935,6 +6140,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
unsigned int entry = cur_rx % NUM_RX_DESC;
+ const void *rx_buf = page_address(tp->Rx_databuff[entry]);
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
@@ -5961,17 +6167,13 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
goto process_pkt;
}
} else {
+ unsigned int pkt_size;
struct sk_buff *skb;
- dma_addr_t addr;
- int pkt_size;
process_pkt:
- addr = le64_to_cpu(desc->addr);
+ pkt_size = status & GENMASK(13, 0);
if (likely(!(dev->features & NETIF_F_RXFCS)))
- pkt_size = (status & 0x00003fff) - 4;
- else
- pkt_size = status & 0x00003fff;
-
+ pkt_size -= ETH_FCS_LEN;
/*
* The driver does not support incoming fragmented
* frames. They are seen as a symptom of over-mtu
@@ -5983,15 +6185,25 @@ process_pkt:
goto release_descriptor;
}
- skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
- tp, pkt_size, addr);
- if (!skb) {
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
+ if (unlikely(!skb)) {
dev->stats.rx_dropped++;
goto release_descriptor;
}
+ dma_sync_single_for_cpu(tp_to_dev(tp),
+ le64_to_cpu(desc->addr),
+ pkt_size, DMA_FROM_DEVICE);
+ prefetch(rx_buf);
+ skb_copy_to_linear_data(skb, rx_buf, pkt_size);
+ skb->tail += pkt_size;
+ skb->len = pkt_size;
+
+ dma_sync_single_for_device(tp_to_dev(tp),
+ le64_to_cpu(desc->addr),
+ pkt_size, DMA_FROM_DEVICE);
+
rtl8169_rx_csum(skb, status);
- skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
rtl8169_rx_vlan_tag(desc, skb);
@@ -6020,9 +6232,10 @@ release_descriptor:
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
- u16 status = RTL_R16(tp, IntrStatus);
+ u32 status = rtl_get_events(tp);
- if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
+ if (!tp->irq_enabled || (status & 0xffff) == 0xffff ||
+ !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -6332,7 +6545,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->multicast = dev->stats.multicast;
/*
- * Fetch additonal counter values missing in stats collected by driver
+ * Fetch additional counter values missing in stats collected by driver
* from tally counters.
*/
if (pm_runtime_active(&pdev->dev))
@@ -6556,6 +6769,7 @@ static const struct net_device_ops rtl_netdev_ops = {
.ndo_stop = rtl8169_close,
.ndo_get_stats64 = rtl8169_get_stats64,
.ndo_start_xmit = rtl8169_start_xmit,
+ .ndo_features_check = rtl8169_features_check,
.ndo_tx_timeout = rtl8169_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = rtl8169_change_mtu,
@@ -6619,6 +6833,8 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
value = rtl_eri_read(tp, 0xe4);
mac_addr[4] = (value >> 0) & 0xff;
mac_addr[5] = (value >> 8) & 0xff;
+ } else if (rtl_is_8125(tp)) {
+ rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
}
}
@@ -6692,8 +6908,6 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
- u32 data;
-
tp->ocp_base = OCP_STD_PHY_BASE;
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
@@ -6708,16 +6922,37 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp)
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- data = r8168_mac_ocp_read(tp, 0xe8de);
- data &= ~(1 << 14);
- r8168_mac_ocp_write(tp, 0xe8de, data);
+ r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
return;
- data = r8168_mac_ocp_read(tp, 0xe8de);
- data |= (1 << 15);
- r8168_mac_ocp_write(tp, 0xe8de, data);
+ r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
+
+ rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+}
+
+static void rtl_hw_init_8125(struct rtl8169_private *tp)
+{
+ tp->ocp_base = OCP_STD_PHY_BASE;
+
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
+ return;
+
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
+ msleep(1);
+ RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
+
+ r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+
+ r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
+ r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
+ r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
@@ -6731,6 +6966,9 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ rtl_hw_init_8125(tp);
+ break;
default:
break;
}
@@ -6794,7 +7032,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
u8 *mac_addr = dev->dev_addr;
- int rc, i;
+ int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
if (!rc)
@@ -6804,8 +7042,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
if (is_valid_ether_addr(mac_addr))
goto done;
- for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = RTL_R8(tp, MAC0 + i);
+ rtl_read_mac_from_reg(tp, mac_addr, MAC0);
if (is_valid_ether_addr(mac_addr))
goto done;
@@ -6917,11 +7154,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
- /* don't enable SG, IP_CSUM and TSO by default - it might not work
- * properly for all devices */
- dev->features |= NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
@@ -6929,8 +7164,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- tp->cp_cmd |= RxChkSum | RxVlan;
-
+ tp->cp_cmd |= RxChkSum;
+ /* RTL8125 uses register RxConfig for VLAN offloading config */
+ if (!rtl_is_8125(tp))
+ tp->cp_cmd |= RxVlan;
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
@@ -6939,8 +7176,22 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- if (rtl_chip_supports_csum_v2(tp))
+ if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
+ dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
+ } else {
+ dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
+ dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
+ }
+
+ /* RTL8168e-vl has a HW issue with TSO */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ }
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index d2c48116f181..2412c87561e0 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -78,7 +78,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
{
int ret;
int i, chan;
- struct resource *res;
struct device *dev = &pdev->dev;
void __iomem *addr;
struct sxgbe_priv_data *priv = NULL;
@@ -88,8 +87,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
/* Get memory resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr))
return PTR_ERR(addr);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 16d6952c312a..0ec13f520e90 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -508,7 +508,7 @@ static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
((efx->mcdi->fn_flags) &
@@ -520,7 +520,7 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
((efx->mcdi->fn_flags) &
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ab58b837df47..2fef7402233e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2517,7 +2517,7 @@ static struct notifier_block efx_netdev_notifier = {
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
@@ -2526,7 +2526,7 @@ static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
@@ -2534,7 +2534,7 @@ static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool enable = count > 0 && *buf != '0';
@@ -3654,7 +3654,7 @@ static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
static int efx_pm_freeze(struct device *dev)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
@@ -3675,7 +3675,7 @@ static int efx_pm_freeze(struct device *dev)
static int efx_pm_thaw(struct device *dev)
{
int rc;
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 9b15c39ac670..eecc348b1c32 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2256,7 +2256,7 @@ static struct notifier_block ef4_netdev_notifier = {
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
@@ -2999,7 +2999,7 @@ static int ef4_pci_probe(struct pci_dev *pci_dev,
static int ef4_pm_freeze(struct device *dev)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
@@ -3020,7 +3020,7 @@ static int ef4_pm_freeze(struct device *dev)
static int ef4_pm_thaw(struct device *dev)
{
int rc;
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/falcon/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
index 839189dab98e..605f486fa675 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
@@ -357,7 +357,7 @@ fail_on:
static ssize_t show_phy_flash_cfg(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
@@ -365,7 +365,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
enum ef4_phy_mode old_mode, new_mode;
int err;
@@ -454,13 +454,13 @@ static int sfe4001_init(struct ef4_nic *efx)
#if IS_ENABLED(CONFIG_SENSORS_LM90)
board->hwmon_client =
- i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
+ i2c_new_client_device(&board->i2c_adap, &sfe4001_hwmon_info);
#else
board->hwmon_client =
- i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
+ i2c_new_dummy_device(&board->i2c_adap, sfe4001_hwmon_info.addr);
#endif
- if (!board->hwmon_client)
- return -EIO;
+ if (IS_ERR(board->hwmon_client))
+ return PTR_ERR(board->hwmon_client);
/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
rc = i2c_smbus_write_byte_data(board->hwmon_client,
@@ -468,9 +468,9 @@ static int sfe4001_init(struct ef4_nic *efx)
if (rc)
goto fail_hwmon;
- board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
- if (!board->ioexp_client) {
- rc = -EIO;
+ board->ioexp_client = i2c_new_dummy_device(&board->i2c_adap, PCA9539);
+ if (IS_ERR(board->ioexp_client)) {
+ rc = PTR_ERR(board->ioexp_client);
goto fail_hwmon;
}
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index fd850d3d8ec0..05ea3523890a 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -424,7 +424,6 @@ ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
- gro_result_t gro_result;
struct ef4_nic *efx = channel->efx;
struct sk_buff *skb;
@@ -460,9 +459,7 @@ ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
skb_record_rx_queue(skb, channel->rx_queue.core_index);
- gro_result = napi_gro_frags(napi);
- if (gro_result != GRO_DROP)
- channel->irq_mod_score += 2;
+ napi_gro_frags(napi);
}
/* Allocate and construct an SKB around page fragments */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d5db045535d3..85ec07f5a674 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -412,7 +412,6 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
- gro_result_t gro_result;
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
@@ -449,9 +448,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
skb_record_rx_queue(skb, channel->rx_queue.core_index);
- gro_result = napi_gro_frags(napi);
- if (gro_result != GRO_DROP)
- channel->irq_mod_score += 2;
+ napi_gro_frags(napi);
}
/* Allocate and construct an SKB around page fragments */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 31ec56091a5d..65e81ec1b314 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -274,7 +274,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
vaddr = kmap_atomic(skb_frag_page(f));
- efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+ efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
skb_frag_size(f), copy_buf);
kunmap_atomic(vaddr);
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 358e66b81926..deb636d653f3 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1,9 +1,5 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
*
* Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
* Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
@@ -15,11 +11,8 @@
*
* To do:
*
- * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
- * o Handle allocation failures in ioc3_init_rings().
* o Use prefetching for large packets. What is a good lower limit for
* prefetching?
- * o We're probably allocating a bit too much memory.
* o Use hardware checksums.
* o Convert to using a IOC3 meta driver.
* o Which PHYs might possibly be attached to the IOC3 in real live,
@@ -39,10 +32,10 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/in.h>
+#include <linux/io.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
-#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#ifdef CONFIG_SERIAL_8250
@@ -55,32 +48,52 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
+#include <linux/dma-direct.h>
+
#include <net/ip.h>
#include <asm/byteorder.h>
-#include <asm/io.h>
#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/sn/types.h>
#include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h>
-/*
- * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
- * value must be a power of two.
+/* Number of RX buffers. This is tunable in the range of 16 <= x < 512.
+ * The value must be a power of two.
*/
-#define RX_BUFFS 64
+#define RX_BUFFS 64
+#define RX_RING_ENTRIES 512 /* fixed in hardware */
+#define RX_RING_MASK (RX_RING_ENTRIES - 1)
+#define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64))
+
+/* 128 TX buffers (not tunable) */
+#define TX_RING_ENTRIES 128
+#define TX_RING_MASK (TX_RING_ENTRIES - 1)
+#define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd))
+
+/* IOC3 does dma transfers in 128 byte blocks */
+#define IOC3_DMA_XFER_LEN 128UL
+
+/* Every RX buffer starts with 8 byte descriptor data */
+#define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN)
+#define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN)
-#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
-#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
+#define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21)
+#define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21)
/* Private per NIC data of the driver. */
struct ioc3_private {
- struct ioc3 *regs;
+ struct ioc3_ethregs *regs;
+ struct ioc3 *all_regs;
+ struct device *dma_dev;
+ u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */
struct ioc3_etxd *txr;
- struct sk_buff *rx_skbs[512];
- struct sk_buff *tx_skbs[128];
+ dma_addr_t rxr_dma;
+ dma_addr_t txr_dma;
+ struct sk_buff *rx_skbs[RX_RING_ENTRIES];
+ struct sk_buff *tx_skbs[TX_RING_ENTRIES];
int rx_ci; /* RX consumer index */
int rx_pi; /* RX producer index */
int tx_ci; /* TX consumer index */
@@ -102,190 +115,138 @@ static void ioc3_set_multicast_list(struct net_device *dev);
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void ioc3_timeout(struct net_device *dev);
static inline unsigned int ioc3_hash(const unsigned char *addr);
+static void ioc3_start(struct ioc3_private *ip);
static inline void ioc3_stop(struct ioc3_private *ip);
static void ioc3_init(struct net_device *dev);
+static int ioc3_alloc_rx_bufs(struct net_device *dev);
+static void ioc3_free_rx_bufs(struct ioc3_private *ip);
+static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
static const char ioc3_str[] = "IOC3 Ethernet";
static const struct ethtool_ops ioc3_ethtool_ops;
-/* We use this to acquire receive skb's that we can DMA directly into. */
-
-#define IOC3_CACHELINE 128UL
static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
{
- return (~addr + 1) & (IOC3_CACHELINE - 1UL);
+ return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL);
}
-static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
- unsigned int gfp_mask)
+static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb,
+ struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma)
{
- struct sk_buff *skb;
+ struct sk_buff *new_skb;
+ dma_addr_t d;
+ int offset;
+
+ new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+
+ /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */
+ offset = aligned_rx_skb_addr((unsigned long)new_skb->data);
+ if (offset)
+ skb_reserve(new_skb, offset);
- skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
- if (likely(skb)) {
- int offset = aligned_rx_skb_addr((unsigned long) skb->data);
- if (offset)
- skb_reserve(skb, offset);
+ d = dma_map_single(ip->dma_dev, new_skb->data,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(ip->dma_dev, d)) {
+ dev_kfree_skb_any(new_skb);
+ return -ENOMEM;
}
+ *rxb_dma = d;
+ *rxb = (struct ioc3_erxbuf *)new_skb->data;
+ skb_reserve(new_skb, RX_OFFSET);
+ *skb = new_skb;
- return skb;
+ return 0;
}
-static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
+#ifdef CONFIG_PCI_XTALK_BRIDGE
+static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
{
-#ifdef CONFIG_SGI_IP27
- vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */
+ return (addr & ~PCI64_ATTR_BAR) | attr;
+}
- return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
- ((unsigned long)ptr & TO_PHYS_MASK);
+#define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT)
#else
- return virt_to_bus(ptr);
-#endif
+static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
+{
+ return addr;
}
-/* BEWARE: The IOC3 documentation documents the size of rx buffers as
- 1644 while it's actually 1664. This one was nasty to track down ... */
-#define RX_OFFSET 10
-#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
-
-/* DMA barrier to separate cached and uncached accesses. */
-#define BARRIER() \
- __asm__("sync" ::: "memory")
-
+#define ERBAR_VAL 0
+#endif
#define IOC3_SIZE 0x100000
-/*
- * IOC3 is a big endian device
- *
- * Unorthodox but makes the users of these macros more readable - the pointer
- * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3
- * in the environment.
- */
-#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
-#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
-#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
-#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
-#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
-#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
-#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
-#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
-#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
-#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
-#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
-#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
-#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
-#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
-#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
-#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
-#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
-#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
-#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
-#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
-#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
-#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
-#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
-#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
-#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
-#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
-#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
-#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
-#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
-#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
-#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
-#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
-#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
-#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
-#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
-#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
-#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
-#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
-#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
-#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
-#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
-#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
-#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
-#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
-#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
-#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
-#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
-#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
-#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
-#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
-#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
-#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
-#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
-
static inline u32 mcr_pack(u32 pulse, u32 sample)
{
return (pulse << 10) | (sample << 2);
}
-static int nic_wait(struct ioc3 *ioc3)
+static int nic_wait(u32 __iomem *mcr)
{
- u32 mcr;
+ u32 m;
- do {
- mcr = ioc3_r_mcr();
- } while (!(mcr & 2));
+ do {
+ m = readl(mcr);
+ } while (!(m & 2));
- return mcr & 1;
+ return m & 1;
}
-static int nic_reset(struct ioc3 *ioc3)
+static int nic_reset(u32 __iomem *mcr)
{
- int presence;
+ int presence;
- ioc3_w_mcr(mcr_pack(500, 65));
- presence = nic_wait(ioc3);
+ writel(mcr_pack(500, 65), mcr);
+ presence = nic_wait(mcr);
- ioc3_w_mcr(mcr_pack(0, 500));
- nic_wait(ioc3);
+ writel(mcr_pack(0, 500), mcr);
+ nic_wait(mcr);
- return presence;
+ return presence;
}
-static inline int nic_read_bit(struct ioc3 *ioc3)
+static inline int nic_read_bit(u32 __iomem *mcr)
{
int result;
- ioc3_w_mcr(mcr_pack(6, 13));
- result = nic_wait(ioc3);
- ioc3_w_mcr(mcr_pack(0, 100));
- nic_wait(ioc3);
+ writel(mcr_pack(6, 13), mcr);
+ result = nic_wait(mcr);
+ writel(mcr_pack(0, 100), mcr);
+ nic_wait(mcr);
return result;
}
-static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
+static inline void nic_write_bit(u32 __iomem *mcr, int bit)
{
if (bit)
- ioc3_w_mcr(mcr_pack(6, 110));
+ writel(mcr_pack(6, 110), mcr);
else
- ioc3_w_mcr(mcr_pack(80, 30));
+ writel(mcr_pack(80, 30), mcr);
- nic_wait(ioc3);
+ nic_wait(mcr);
}
-/*
- * Read a byte from an iButton device
+/* Read a byte from an iButton device
*/
-static u32 nic_read_byte(struct ioc3 *ioc3)
+static u32 nic_read_byte(u32 __iomem *mcr)
{
u32 result = 0;
int i;
for (i = 0; i < 8; i++)
- result = (result >> 1) | (nic_read_bit(ioc3) << 7);
+ result = (result >> 1) | (nic_read_bit(mcr) << 7);
return result;
}
-/*
- * Write a byte to an iButton device
+/* Write a byte to an iButton device
*/
-static void nic_write_byte(struct ioc3 *ioc3, int byte)
+static void nic_write_byte(u32 __iomem *mcr, int byte)
{
int i, bit;
@@ -293,26 +254,26 @@ static void nic_write_byte(struct ioc3 *ioc3, int byte)
bit = byte & 1;
byte >>= 1;
- nic_write_bit(ioc3, bit);
+ nic_write_bit(mcr, bit);
}
}
-static u64 nic_find(struct ioc3 *ioc3, int *last)
+static u64 nic_find(u32 __iomem *mcr, int *last)
{
int a, b, index, disc;
u64 address = 0;
- nic_reset(ioc3);
+ nic_reset(mcr);
/* Search ROM. */
- nic_write_byte(ioc3, 0xf0);
+ nic_write_byte(mcr, 0xf0);
/* Algorithm from ``Book of iButton Standards''. */
for (index = 0, disc = 0; index < 64; index++) {
- a = nic_read_bit(ioc3);
- b = nic_read_bit(ioc3);
+ a = nic_read_bit(mcr);
+ b = nic_read_bit(mcr);
if (a && b) {
- printk("NIC search failed (not fatal).\n");
+ pr_warn("NIC search failed (not fatal).\n");
*last = 0;
return 0;
}
@@ -323,16 +284,17 @@ static u64 nic_find(struct ioc3 *ioc3, int *last)
} else if (index > *last) {
address &= ~(1UL << index);
disc = index;
- } else if ((address & (1UL << index)) == 0)
+ } else if ((address & (1UL << index)) == 0) {
disc = index;
- nic_write_bit(ioc3, address & (1UL << index));
+ }
+ nic_write_bit(mcr, address & (1UL << index));
continue;
} else {
if (a)
address |= 1UL << index;
else
address &= ~(1UL << index);
- nic_write_bit(ioc3, a);
+ nic_write_bit(mcr, a);
continue;
}
}
@@ -342,7 +304,7 @@ static u64 nic_find(struct ioc3 *ioc3, int *last)
return address;
}
-static int nic_init(struct ioc3 *ioc3)
+static int nic_init(u32 __iomem *mcr)
{
const char *unknown = "unknown";
const char *type = unknown;
@@ -352,7 +314,8 @@ static int nic_init(struct ioc3 *ioc3)
while (1) {
u64 reg;
- reg = nic_find(ioc3, &save);
+
+ reg = nic_find(mcr, &save);
switch (reg & 0xff) {
case 0x91:
@@ -366,12 +329,12 @@ static int nic_init(struct ioc3 *ioc3)
continue;
}
- nic_reset(ioc3);
+ nic_reset(mcr);
/* Match ROM. */
- nic_write_byte(ioc3, 0x55);
+ nic_write_byte(mcr, 0x55);
for (i = 0; i < 8; i++)
- nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
+ nic_write_byte(mcr, (reg >> (i << 3)) & 0xff);
reg >>= 8; /* Shift out type. */
for (i = 0; i < 6; i++) {
@@ -382,52 +345,50 @@ static int nic_init(struct ioc3 *ioc3)
break;
}
- printk("Found %s NIC", type);
+ pr_info("Found %s NIC", type);
if (type != unknown)
- printk (" registration number %pM, CRC %02x", serial, crc);
- printk(".\n");
+ pr_cont(" registration number %pM, CRC %02x", serial, crc);
+ pr_cont(".\n");
return 0;
}
-/*
- * Read the NIC (Number-In-a-Can) device used to store the MAC address on
+/* Read the NIC (Number-In-a-Can) device used to store the MAC address on
* SN0 / SN00 nodeboards and PCI cards.
*/
static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
{
- struct ioc3 *ioc3 = ip->regs;
- u8 nic[14];
+ u32 __iomem *mcr = &ip->all_regs->mcr;
int tries = 2; /* There may be some problem with the battery? */
+ u8 nic[14];
int i;
- ioc3_w_gpcr_s(1 << 21);
+ writel(1 << 21, &ip->all_regs->gpcr_s);
while (tries--) {
- if (!nic_init(ioc3))
+ if (!nic_init(mcr))
break;
udelay(500);
}
if (tries < 0) {
- printk("Failed to read MAC address\n");
+ pr_err("Failed to read MAC address\n");
return;
}
/* Read Memory. */
- nic_write_byte(ioc3, 0xf0);
- nic_write_byte(ioc3, 0x00);
- nic_write_byte(ioc3, 0x00);
+ nic_write_byte(mcr, 0xf0);
+ nic_write_byte(mcr, 0x00);
+ nic_write_byte(mcr, 0x00);
for (i = 13; i >= 0; i--)
- nic[i] = nic_read_byte(ioc3);
+ nic[i] = nic_read_byte(mcr);
for (i = 2; i < 8; i++)
ip->dev->dev_addr[i - 2] = nic[i];
}
-/*
- * Ok, this is hosed by design. It's necessary to know what machine the
+/* Ok, this is hosed by design. It's necessary to know what machine the
* NIC is in in order to know how to read the NIC address. We also have
* to know if it's a PCI card or a NIC in on the node board ...
*/
@@ -435,17 +396,21 @@ static void ioc3_get_eaddr(struct ioc3_private *ip)
{
ioc3_get_eaddr_nic(ip);
- printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
+ pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr);
}
static void __ioc3_set_mac_address(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
- ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
- ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
- (dev->dev_addr[1] << 8) | dev->dev_addr[0]);
+ writel((dev->dev_addr[5] << 8) |
+ dev->dev_addr[4],
+ &ip->regs->emar_h);
+ writel((dev->dev_addr[3] << 24) |
+ (dev->dev_addr[2] << 16) |
+ (dev->dev_addr[1] << 8) |
+ dev->dev_addr[0],
+ &ip->regs->emar_l);
}
static int ioc3_set_mac_address(struct net_device *dev, void *addr)
@@ -462,31 +427,35 @@ static int ioc3_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-/*
- * Caller must hold the ioc3_lock ever for MII readers. This is also
+/* Caller must hold the ioc3_lock ever for MII readers. This is also
* used to protect the transmitter side but it's low contention.
*/
static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
- while (ioc3_r_micr() & MICR_BUSY);
- ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
- while (ioc3_r_micr() & MICR_BUSY);
+ while (readl(&regs->micr) & MICR_BUSY)
+ ;
+ writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG,
+ &regs->micr);
+ while (readl(&regs->micr) & MICR_BUSY)
+ ;
- return ioc3_r_midr_r() & MIDR_DATA_MASK;
+ return readl(&regs->midr_r) & MIDR_DATA_MASK;
}
static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
-
- while (ioc3_r_micr() & MICR_BUSY);
- ioc3_w_midr_w(data);
- ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
- while (ioc3_r_micr() & MICR_BUSY);
+ struct ioc3_ethregs *regs = ip->regs;
+
+ while (readl(&regs->micr) & MICR_BUSY)
+ ;
+ writel(data, &regs->midr_w);
+ writel((phy << MICR_PHYADDR_SHIFT) | reg, &regs->micr);
+ while (readl(&regs->micr) & MICR_BUSY)
+ ;
}
static int ioc3_mii_init(struct ioc3_private *ip);
@@ -494,23 +463,22 @@ static int ioc3_mii_init(struct ioc3_private *ip);
static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
- dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
+ dev->stats.collisions += readl(&regs->etcdc) & ETCDC_COLLCNT_MASK;
return &dev->stats;
}
-static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
+static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len)
{
struct ethhdr *eh = eth_hdr(skb);
- uint32_t csum, ehsum;
unsigned int proto;
- struct iphdr *ih;
- uint16_t *ew;
unsigned char *cp;
+ struct iphdr *ih;
+ u32 csum, ehsum;
+ u16 *ew;
- /*
- * Did hardware handle the checksum at all? The cases we can handle
+ /* Did hardware handle the checksum at all? The cases we can handle
* are:
*
* - TCP and UDP checksums of IPv4 only.
@@ -526,7 +494,7 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
if (eh->h_proto != htons(ETH_P_IP))
return;
- ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
+ ih = (struct iphdr *)((char *)eh + ETH_HLEN);
if (ip_is_fragment(ih))
return;
@@ -537,12 +505,12 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
/* Same as tx - compute csum of pseudo header */
csum = hwsum +
(ih->tot_len - (ih->ihl << 2)) +
- htons((uint16_t)ih->protocol) +
+ htons((u16)ih->protocol) +
(ih->saddr >> 16) + (ih->saddr & 0xffff) +
(ih->daddr >> 16) + (ih->daddr & 0xffff);
/* Sum up ethernet dest addr, src addr and protocol */
- ew = (uint16_t *) eh;
+ ew = (u16 *)eh;
ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
ehsum = (ehsum & 0xffff) + (ehsum >> 16);
@@ -551,14 +519,15 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
csum += 0xffff ^ ehsum;
/* In the next step we also subtract the 1's complement
- checksum of the trailing ethernet CRC. */
+ * checksum of the trailing ethernet CRC.
+ */
cp = (char *)eh + len; /* points at trailing CRC */
if (len & 1) {
- csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
- csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
+ csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]);
+ csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]);
} else {
- csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
- csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
+ csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]);
+ csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]);
}
csum = (csum & 0xffff) + (csum >> 16);
@@ -572,10 +541,10 @@ static inline void ioc3_rx(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct sk_buff *skb, *new_skb;
- struct ioc3 *ioc3 = ip->regs;
int rx_entry, n_entry, len;
struct ioc3_erxbuf *rxb;
unsigned long *rxr;
+ dma_addr_t d;
u32 w0, err;
rxr = ip->rxr; /* Ring base */
@@ -583,64 +552,67 @@ static inline void ioc3_rx(struct net_device *dev)
n_entry = ip->rx_pi;
skb = ip->rx_skbs[rx_entry];
- rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
w0 = be32_to_cpu(rxb->w0);
while (w0 & ERXBUF_V) {
err = be32_to_cpu(rxb->err); /* It's valid ... */
if (err & ERXBUF_GOODPKT) {
len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
- skb_trim(skb, len);
+ skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
- if (!new_skb) {
+ if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) {
/* Ouch, drop packet and just recycle packet
- to keep the ring filled. */
+ * to keep the ring filled.
+ */
dev->stats.rx_dropped++;
new_skb = skb;
+ d = rxr[rx_entry];
goto next;
}
if (likely(dev->features & NETIF_F_RXCSUM))
ioc3_tcpudp_checksum(skb,
- w0 & ERXBUF_IPCKSUM_MASK, len);
+ w0 & ERXBUF_IPCKSUM_MASK,
+ len);
+
+ dma_unmap_single(ip->dma_dev, rxr[rx_entry],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
netif_rx(skb);
ip->rx_skbs[rx_entry] = NULL; /* Poison */
- /* Because we reserve afterwards. */
- skb_put(new_skb, (1664 + RX_OFFSET));
- rxb = (struct ioc3_erxbuf *) new_skb->data;
- skb_reserve(new_skb, RX_OFFSET);
-
dev->stats.rx_packets++; /* Statistics */
dev->stats.rx_bytes += len;
} else {
/* The frame is invalid and the skb never
- reached the network layer so we can just
- recycle it. */
+ * reached the network layer so we can just
+ * recycle it.
+ */
new_skb = skb;
+ d = rxr[rx_entry];
dev->stats.rx_errors++;
}
if (err & ERXBUF_CRCERR) /* Statistics */
dev->stats.rx_crc_errors++;
if (err & ERXBUF_FRAMERR)
dev->stats.rx_frame_errors++;
+
next:
ip->rx_skbs[n_entry] = new_skb;
- rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
+ rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
rxb->w0 = 0; /* Clear valid flag */
- n_entry = (n_entry + 1) & 511; /* Update erpir */
+ n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */
/* Now go on to the next ring entry. */
- rx_entry = (rx_entry + 1) & 511;
+ rx_entry = (rx_entry + 1) & RX_RING_MASK;
skb = ip->rx_skbs[rx_entry];
- rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
w0 = be32_to_cpu(rxb->w0);
}
- ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
+ writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir);
ip->rx_pi = n_entry;
ip->rx_ci = rx_entry;
}
@@ -648,16 +620,16 @@ next:
static inline void ioc3_tx(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3_ethregs *regs = ip->regs;
unsigned long packets, bytes;
- struct ioc3 *ioc3 = ip->regs;
int tx_entry, o_entry;
struct sk_buff *skb;
u32 etcir;
spin_lock(&ip->ioc3_lock);
- etcir = ioc3_r_etcir();
+ etcir = readl(&regs->etcir);
- tx_entry = (etcir >> 7) & 127;
+ tx_entry = (etcir >> 7) & TX_RING_MASK;
o_entry = ip->tx_ci;
packets = 0;
bytes = 0;
@@ -669,25 +641,24 @@ static inline void ioc3_tx(struct net_device *dev)
dev_consume_skb_irq(skb);
ip->tx_skbs[o_entry] = NULL;
- o_entry = (o_entry + 1) & 127; /* Next */
+ o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */
- etcir = ioc3_r_etcir(); /* More pkts sent? */
- tx_entry = (etcir >> 7) & 127;
+ etcir = readl(&regs->etcir); /* More pkts sent? */
+ tx_entry = (etcir >> 7) & TX_RING_MASK;
}
dev->stats.tx_packets += packets;
dev->stats.tx_bytes += bytes;
ip->txqlen -= packets;
- if (ip->txqlen < 128)
+ if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES)
netif_wake_queue(dev);
ip->tx_ci = o_entry;
spin_unlock(&ip->ioc3_lock);
}
-/*
- * Deal with fatal IOC3 errors. This condition might be caused by a hard or
+/* Deal with fatal IOC3 errors. This condition might be caused by a hard or
* software problems, so we should try to recover
* more gracefully if this ever happens. In theory we might be flooded
* with such error interrupts if something really goes wrong, so we might
@@ -696,25 +667,33 @@ static inline void ioc3_tx(struct net_device *dev)
static void ioc3_error(struct net_device *dev, u32 eisr)
{
struct ioc3_private *ip = netdev_priv(dev);
- unsigned char *iface = dev->name;
spin_lock(&ip->ioc3_lock);
if (eisr & EISR_RXOFLO)
- printk(KERN_ERR "%s: RX overflow.\n", iface);
+ net_err_ratelimited("%s: RX overflow.\n", dev->name);
if (eisr & EISR_RXBUFOFLO)
- printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
+ net_err_ratelimited("%s: RX buffer overflow.\n", dev->name);
if (eisr & EISR_RXMEMERR)
- printk(KERN_ERR "%s: RX PCI error.\n", iface);
+ net_err_ratelimited("%s: RX PCI error.\n", dev->name);
if (eisr & EISR_RXPARERR)
- printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
+ net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name);
if (eisr & EISR_TXBUFUFLO)
- printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
+ net_err_ratelimited("%s: TX buffer underflow.\n", dev->name);
if (eisr & EISR_TXMEMERR)
- printk(KERN_ERR "%s: TX PCI error.\n", iface);
+ net_err_ratelimited("%s: TX PCI error.\n", dev->name);
ioc3_stop(ip);
+ ioc3_free_rx_bufs(ip);
+ ioc3_clean_tx_ring(ip);
+
ioc3_init(dev);
+ if (ioc3_alloc_rx_bufs(dev)) {
+ netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
+ spin_unlock(&ip->ioc3_lock);
+ return;
+ }
+ ioc3_start(ip);
ioc3_mii_init(ip);
netif_wake_queue(dev);
@@ -723,45 +702,45 @@ static void ioc3_error(struct net_device *dev, u32 eisr)
}
/* The interrupt handler does all of the Rx thread work and cleans up
- after the Tx thread. */
-static irqreturn_t ioc3_interrupt(int irq, void *_dev)
+ * after the Tx thread.
+ */
+static irqreturn_t ioc3_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *)_dev;
- struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
- const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
- EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
- EISR_TXEXPLICIT | EISR_TXMEMERR;
+ struct ioc3_private *ip = netdev_priv(dev_id);
+ struct ioc3_ethregs *regs = ip->regs;
u32 eisr;
- eisr = ioc3_r_eisr() & enabled;
-
- ioc3_w_eisr(eisr);
- (void) ioc3_r_eisr(); /* Flush */
+ eisr = readl(&regs->eisr);
+ writel(eisr, &regs->eisr);
+ readl(&regs->eisr); /* Flush */
if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
- EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
- ioc3_error(dev, eisr);
+ EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
+ ioc3_error(dev_id, eisr);
if (eisr & EISR_RXTIMERINT)
- ioc3_rx(dev);
+ ioc3_rx(dev_id);
if (eisr & EISR_TXEXPLICIT)
- ioc3_tx(dev);
+ ioc3_tx(dev_id);
return IRQ_HANDLED;
}
static inline void ioc3_setup_duplex(struct ioc3_private *ip)
{
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
+
+ spin_lock_irq(&ip->ioc3_lock);
if (ip->mii.full_duplex) {
- ioc3_w_etcsr(ETCSR_FD);
+ writel(ETCSR_FD, &regs->etcsr);
ip->emcr |= EMCR_DUPLEX;
} else {
- ioc3_w_etcsr(ETCSR_HD);
+ writel(ETCSR_HD, &regs->etcsr);
ip->emcr &= ~EMCR_DUPLEX;
}
- ioc3_w_emcr(ip->emcr);
+ writel(ip->emcr, &regs->emcr);
+
+ spin_unlock_irq(&ip->ioc3_lock);
}
static void ioc3_timer(struct timer_list *t)
@@ -772,12 +751,11 @@ static void ioc3_timer(struct timer_list *t)
mii_check_media(&ip->mii, 1, 0);
ioc3_setup_duplex(ip);
- ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
+ ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */
add_timer(&ip->ioc3_timer);
}
-/*
- * Try to find a PHY. There is no apparent relation between the MII addresses
+/* Try to find a PHY. There is no apparent relation between the MII addresses
* in the SGI documentation and what we find in reality, so we simply probe
* for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
* onboard IOC3s has the special oddity that probing doesn't seem to find it
@@ -786,8 +764,8 @@ static void ioc3_timer(struct timer_list *t)
*/
static int ioc3_mii_init(struct ioc3_private *ip)
{
- int i, found = 0, res = 0;
int ioc3_phy_workaround = 1;
+ int i, found = 0, res = 0;
u16 word;
for (i = 0; i < 32; i++) {
@@ -800,9 +778,9 @@ static int ioc3_mii_init(struct ioc3_private *ip)
}
if (!found) {
- if (ioc3_phy_workaround)
+ if (ioc3_phy_workaround) {
i = 31;
- else {
+ } else {
ip->mii.phy_id = -1;
res = -ENODEV;
goto out;
@@ -817,27 +795,27 @@ out:
static void ioc3_mii_start(struct ioc3_private *ip)
{
- ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
+ ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */
add_timer(&ip->ioc3_timer);
}
-static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
+static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry)
{
- struct sk_buff *skb;
- int i;
-
- for (i = ip->rx_ci; i & 15; i++) {
- ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
- ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
+ struct ioc3_etxd *desc;
+ u32 cmd, bufcnt, len;
+
+ desc = &ip->txr[entry];
+ cmd = be32_to_cpu(desc->cmd);
+ bufcnt = be32_to_cpu(desc->bufcnt);
+ if (cmd & ETXD_B1V) {
+ len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT;
+ dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1),
+ len, DMA_TO_DEVICE);
}
- ip->rx_pi &= 511;
- ip->rx_ci &= 511;
-
- for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
- struct ioc3_erxbuf *rxb;
- skb = ip->rx_skbs[i];
- rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
- rxb->w0 = 0;
+ if (cmd & ETXD_B2V) {
+ len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT;
+ dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2),
+ len, DMA_TO_DEVICE);
}
}
@@ -846,9 +824,10 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
struct sk_buff *skb;
int i;
- for (i=0; i < 128; i++) {
+ for (i = 0; i < TX_RING_ENTRIES; i++) {
skb = ip->tx_skbs[i];
if (skb) {
+ ioc3_tx_unmap(ip, i);
ip->tx_skbs[i] = NULL;
dev_kfree_skb_any(skb);
}
@@ -858,179 +837,137 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
ip->tx_ci = 0;
}
-static void ioc3_free_rings(struct ioc3_private *ip)
+static void ioc3_free_rx_bufs(struct ioc3_private *ip)
{
- struct sk_buff *skb;
int rx_entry, n_entry;
+ struct sk_buff *skb;
- if (ip->txr) {
- ioc3_clean_tx_ring(ip);
- free_pages((unsigned long)ip->txr, 2);
- ip->txr = NULL;
- }
-
- if (ip->rxr) {
- n_entry = ip->rx_ci;
- rx_entry = ip->rx_pi;
-
- while (n_entry != rx_entry) {
- skb = ip->rx_skbs[n_entry];
- if (skb)
- dev_kfree_skb_any(skb);
+ n_entry = ip->rx_ci;
+ rx_entry = ip->rx_pi;
- n_entry = (n_entry + 1) & 511;
+ while (n_entry != rx_entry) {
+ skb = ip->rx_skbs[n_entry];
+ if (skb) {
+ dma_unmap_single(ip->dma_dev,
+ be64_to_cpu(ip->rxr[n_entry]),
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
}
- free_page((unsigned long)ip->rxr);
- ip->rxr = NULL;
+ n_entry = (n_entry + 1) & RX_RING_MASK;
}
}
-static void ioc3_alloc_rings(struct net_device *dev)
+static int ioc3_alloc_rx_bufs(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3_erxbuf *rxb;
- unsigned long *rxr;
+ dma_addr_t d;
int i;
- if (ip->rxr == NULL) {
- /* Allocate and initialize rx ring. 4kb = 512 entries */
- ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
- rxr = ip->rxr;
- if (!rxr)
- printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
-
- /* Now the rx buffers. The RX ring may be larger but
- we only allocate 16 buffers for now. Need to tune
- this for performance and memory later. */
- for (i = 0; i < RX_BUFFS; i++) {
- struct sk_buff *skb;
-
- skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
- if (!skb) {
- show_free_areas(0, NULL);
- continue;
- }
-
- ip->rx_skbs[i] = skb;
-
- /* Because we reserve afterwards. */
- skb_put(skb, (1664 + RX_OFFSET));
- rxb = (struct ioc3_erxbuf *) skb->data;
- rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
- skb_reserve(skb, RX_OFFSET);
- }
- ip->rx_ci = 0;
- ip->rx_pi = RX_BUFFS;
- }
+ /* Now the rx buffers. The RX ring may be larger but
+ * we only allocate 16 buffers for now. Need to tune
+ * this for performance and memory later.
+ */
+ for (i = 0; i < RX_BUFFS; i++) {
+ if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d))
+ return -ENOMEM;
- if (ip->txr == NULL) {
- /* Allocate and initialize tx rings. 16kb = 128 bufs. */
- ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
- if (!ip->txr)
- printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
- ip->tx_pi = 0;
- ip->tx_ci = 0;
+ rxb->w0 = 0; /* Clear valid flag */
+ ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
}
-}
-
-static void ioc3_init_rings(struct net_device *dev)
-{
- struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
- unsigned long ring;
-
- ioc3_free_rings(ip);
- ioc3_alloc_rings(dev);
-
- ioc3_clean_rx_ring(ip);
- ioc3_clean_tx_ring(ip);
+ ip->rx_ci = 0;
+ ip->rx_pi = RX_BUFFS;
- /* Now the rx ring base, consume & produce registers. */
- ring = ioc3_map(ip->rxr, 0);
- ioc3_w_erbr_h(ring >> 32);
- ioc3_w_erbr_l(ring & 0xffffffff);
- ioc3_w_ercir(ip->rx_ci << 3);
- ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
-
- ring = ioc3_map(ip->txr, 0);
-
- ip->txqlen = 0; /* nothing queued */
-
- /* Now the tx ring base, consume & produce registers. */
- ioc3_w_etbr_h(ring >> 32);
- ioc3_w_etbr_l(ring & 0xffffffff);
- ioc3_w_etpir(ip->tx_pi << 7);
- ioc3_w_etcir(ip->tx_ci << 7);
- (void) ioc3_r_etcir(); /* Flush */
+ return 0;
}
static inline void ioc3_ssram_disc(struct ioc3_private *ip)
{
- struct ioc3 *ioc3 = ip->regs;
- volatile u32 *ssram0 = &ioc3->ssram[0x0000];
- volatile u32 *ssram1 = &ioc3->ssram[0x4000];
- unsigned int pattern = 0x5555;
+ struct ioc3_ethregs *regs = ip->regs;
+ u32 *ssram0 = &ip->ssram[0x0000];
+ u32 *ssram1 = &ip->ssram[0x4000];
+ u32 pattern = 0x5555;
/* Assume the larger size SSRAM and enable parity checking */
- ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
+ writel(readl(&regs->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), &regs->emcr);
+ readl(&regs->emcr); /* Flush */
- *ssram0 = pattern;
- *ssram1 = ~pattern & IOC3_SSRAM_DM;
+ writel(pattern, ssram0);
+ writel(~pattern & IOC3_SSRAM_DM, ssram1);
- if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
- (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
+ if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern ||
+ (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
/* set ssram size to 64 KB */
- ip->emcr = EMCR_RAMPAR;
- ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
- } else
- ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
+ ip->emcr |= EMCR_RAMPAR;
+ writel(readl(&regs->emcr) & ~EMCR_BUFSIZ, &regs->emcr);
+ } else {
+ ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR;
+ }
}
static void ioc3_init(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
del_timer_sync(&ip->ioc3_timer); /* Kill if running */
- ioc3_w_emcr(EMCR_RST); /* Reset */
- (void) ioc3_r_emcr(); /* Flush WB */
+ writel(EMCR_RST, &regs->emcr); /* Reset */
+ readl(&regs->emcr); /* Flush WB */
udelay(4); /* Give it time ... */
- ioc3_w_emcr(0);
- (void) ioc3_r_emcr();
+ writel(0, &regs->emcr);
+ readl(&regs->emcr);
/* Misc registers */
-#ifdef CONFIG_SGI_IP27
- ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */
-#else
- ioc3_w_erbar(0); /* Let PCI API get it right */
-#endif
- (void) ioc3_r_etcdc(); /* Clear on read */
- ioc3_w_ercsr(15); /* RX low watermark */
- ioc3_w_ertr(0); /* Interrupt immediately */
+ writel(ERBAR_VAL, &regs->erbar);
+ readl(&regs->etcdc); /* Clear on read */
+ writel(15, &regs->ercsr); /* RX low watermark */
+ writel(0, &regs->ertr); /* Interrupt immediately */
__ioc3_set_mac_address(dev);
- ioc3_w_ehar_h(ip->ehar_h);
- ioc3_w_ehar_l(ip->ehar_l);
- ioc3_w_ersr(42); /* XXX should be random */
+ writel(ip->ehar_h, &regs->ehar_h);
+ writel(ip->ehar_l, &regs->ehar_l);
+ writel(42, &regs->ersr); /* XXX should be random */
+}
- ioc3_init_rings(dev);
+static void ioc3_start(struct ioc3_private *ip)
+{
+ struct ioc3_ethregs *regs = ip->regs;
+ unsigned long ring;
+
+ /* Now the rx ring base, consume & produce registers. */
+ ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC);
+ writel(ring >> 32, &regs->erbr_h);
+ writel(ring & 0xffffffff, &regs->erbr_l);
+ writel(ip->rx_ci << 3, &regs->ercir);
+ writel((ip->rx_pi << 3) | ERPIR_ARM, &regs->erpir);
+
+ ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC);
+
+ ip->txqlen = 0; /* nothing queued */
+
+ /* Now the tx ring base, consume & produce registers. */
+ writel(ring >> 32, &regs->etbr_h);
+ writel(ring & 0xffffffff, &regs->etbr_l);
+ writel(ip->tx_pi << 7, &regs->etpir);
+ writel(ip->tx_ci << 7, &regs->etcir);
+ readl(&regs->etcir); /* Flush */
ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
- EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
- ioc3_w_emcr(ip->emcr);
- ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
- EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
- EISR_TXEXPLICIT | EISR_TXMEMERR);
- (void) ioc3_r_eier();
+ EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
+ writel(ip->emcr, &regs->emcr);
+ writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
+ EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
+ EISR_TXEXPLICIT | EISR_TXMEMERR, &regs->eier);
+ readl(&regs->eier);
}
static inline void ioc3_stop(struct ioc3_private *ip)
{
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
- ioc3_w_emcr(0); /* Shutup */
- ioc3_w_eier(0); /* Disable interrupts */
- (void) ioc3_r_eier(); /* Flush */
+ writel(0, &regs->emcr); /* Shutup */
+ writel(0, &regs->eier); /* Disable interrupts */
+ readl(&regs->eier); /* Flush */
}
static int ioc3_open(struct net_device *dev)
@@ -1038,14 +975,20 @@ static int ioc3_open(struct net_device *dev)
struct ioc3_private *ip = netdev_priv(dev);
if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
- printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+ netdev_err(dev, "Can't get irq %d\n", dev->irq);
return -EAGAIN;
}
ip->ehar_h = 0;
ip->ehar_l = 0;
+
ioc3_init(dev);
+ if (ioc3_alloc_rx_bufs(dev)) {
+ netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+ ioc3_start(ip);
ioc3_mii_start(ip);
netif_start_queue(dev);
@@ -1063,12 +1006,13 @@ static int ioc3_close(struct net_device *dev)
ioc3_stop(ip);
free_irq(dev->irq, dev);
- ioc3_free_rings(ip);
+ ioc3_free_rx_bufs(ip);
+ ioc3_clean_tx_ring(ip);
+
return 0;
}
-/*
- * MENET cards have four IOC3 chips, which are attached to two sets of
+/* MENET cards have four IOC3 chips, which are attached to two sets of
* PCI slot resources each: the primary connections are on slots
* 0..3 and the secondaries are on 4..7
*
@@ -1085,7 +1029,7 @@ static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
if (dev) {
if (dev->vendor == PCI_VENDOR_ID_SGI &&
- dev->device == PCI_DEVICE_ID_SGI_IOC3)
+ dev->device == PCI_DEVICE_ID_SGI_IOC3)
ret = 1;
pci_dev_put(dev);
}
@@ -1095,15 +1039,14 @@ static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
static int ioc3_is_menet(struct pci_dev *pdev)
{
- return pdev->bus->parent == NULL &&
+ return !pdev->bus->parent &&
ioc3_adjacent_is_ioc3(pdev, 0) &&
ioc3_adjacent_is_ioc3(pdev, 1) &&
ioc3_adjacent_is_ioc3(pdev, 2);
}
#ifdef CONFIG_SERIAL_8250
-/*
- * Note about serial ports and consoles:
+/* Note about serial ports and consoles:
* For console output, everyone uses the IOC3 UARTA (offset 0x178)
* connected to the master node (look in ip27_setup_console() and
* ip27prom_console_write()).
@@ -1140,31 +1083,32 @@ static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
#define COSMISC_CONSTANT 6
struct uart_8250_port port = {
- .port = {
+ .port = {
.irq = 0,
.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = (22000000 << 1) / COSMISC_CONSTANT,
- .membase = (unsigned char __iomem *) uart,
- .mapbase = (unsigned long) uart,
- }
+ .membase = (unsigned char __iomem *)uart,
+ .mapbase = (unsigned long)uart,
+ }
};
unsigned char lcr;
- lcr = uart->iu_lcr;
- uart->iu_lcr = lcr | UART_LCR_DLAB;
- uart->iu_scr = COSMISC_CONSTANT,
- uart->iu_lcr = lcr;
- uart->iu_lcr;
+ lcr = readb(&uart->iu_lcr);
+ writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
+ writeb(COSMISC_CONSTANT, &uart->iu_scr);
+ writeb(lcr, &uart->iu_lcr);
+ readb(&uart->iu_lcr);
serial8250_register_8250_port(&port);
}
static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
{
- /*
- * We need to recognice and treat the fourth MENET serial as it
+ u32 sio_iec;
+
+ /* We need to recognice and treat the fourth MENET serial as it
* does not have an SuperIO chip attached to it, therefore attempting
* to access it will result in bus errors. We call something an
* MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
@@ -1175,33 +1119,34 @@ static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
return;
- /*
- * Switch IOC3 to PIO mode. It probably already was but let's be
+ /* Switch IOC3 to PIO mode. It probably already was but let's be
* paranoid
*/
- ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL;
- ioc3->gpcr_s;
- ioc3->gppr_6 = 0;
- ioc3->gppr_6;
- ioc3->gppr_7 = 0;
- ioc3->gppr_7;
- ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN;
- ioc3->sscr_a;
- ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN;
- ioc3->sscr_b;
+ writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s);
+ readl(&ioc3->gpcr_s);
+ writel(0, &ioc3->gppr[6]);
+ readl(&ioc3->gppr[6]);
+ writel(0, &ioc3->gppr[7]);
+ readl(&ioc3->gppr[7]);
+ writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr);
+ readl(&ioc3->port_a.sscr);
+ writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr);
+ readl(&ioc3->port_b.sscr);
/* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
- ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
- SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
- SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
- SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
- ioc3->sio_iec |= SIO_IR_SA_INT;
- ioc3->sscr_a = 0;
- ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
- SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
- SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
- SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
- ioc3->sio_iec |= SIO_IR_SB_INT;
- ioc3->sscr_b = 0;
+ sio_iec = readl(&ioc3->sio_iec);
+ sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
+ SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
+ SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
+ SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
+ sio_iec |= SIO_IR_SA_INT;
+ sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
+ SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
+ SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
+ SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
+ sio_iec |= SIO_IR_SB_INT;
+ writel(sio_iec, &ioc3->sio_iec);
+ writel(0, &ioc3->port_a.sscr);
+ writel(0, &ioc3->port_b.sscr);
ioc3_8250_register(&ioc3->sregs.uarta);
ioc3_8250_register(&ioc3->sregs.uartb);
@@ -1236,15 +1181,15 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err < 0) {
- printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
- "for consistent allocations\n", pci_name(pdev));
+ pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n",
+ pci_name(pdev));
goto out;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR "%s: No usable DMA configuration, "
- "aborting.\n", pci_name(pdev));
+ pr_err("%s: No usable DMA configuration, aborting.\n",
+ pci_name(pdev));
goto out;
}
pci_using_dac = 0;
@@ -1270,19 +1215,22 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ip = netdev_priv(dev);
ip->dev = dev;
+ ip->dma_dev = &pdev->dev;
dev->irq = pdev->irq;
ioc3_base = pci_resource_start(pdev, 0);
ioc3_size = pci_resource_len(pdev, 0);
- ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
+ ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size);
if (!ioc3) {
- printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
+ pr_err("ioc3eth(%s): ioremap failed, goodbye.\n",
pci_name(pdev));
err = -ENOMEM;
goto out_res;
}
- ip->regs = ioc3;
+ ip->regs = &ioc3->eth;
+ ip->ssram = ioc3->ssram;
+ ip->all_regs = ioc3;
#ifdef CONFIG_SERIAL_8250
ioc3_serial_probe(pdev, ioc3);
@@ -1292,6 +1240,26 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
ioc3_stop(ip);
+
+ /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
+ ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE,
+ &ip->rxr_dma, GFP_ATOMIC, 0);
+ if (!ip->rxr) {
+ pr_err("ioc3-eth: rx ring allocation failed\n");
+ err = -ENOMEM;
+ goto out_stop;
+ }
+
+ /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
+ ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE,
+ &ip->txr_dma,
+ GFP_KERNEL | __GFP_ZERO, 0);
+ if (!ip->txr) {
+ pr_err("ioc3-eth: tx ring allocation failed\n");
+ err = -ENOMEM;
+ goto out_stop;
+ }
+
ioc3_init(dev);
ip->pdev = pdev;
@@ -1305,7 +1273,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_mii_init(ip);
if (ip->mii.phy_id == -1) {
- printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
+ pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
pci_name(pdev));
err = -ENODEV;
goto out_stop;
@@ -1335,24 +1303,27 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
model = (sw_physid2 >> 4) & 0x3f;
rev = sw_physid2 & 0xf;
- printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
- "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
- printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
- ip->emcr & EMCR_BUFSIZ ? 128 : 64);
+ netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n",
+ ip->mii.phy_id, vendor, model, rev);
+ netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n",
+ ip->emcr & EMCR_BUFSIZ ? 128 : 64);
return 0;
out_stop:
- ioc3_stop(ip);
del_timer_sync(&ip->ioc3_timer);
- ioc3_free_rings(ip);
+ if (ip->rxr)
+ dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
+ ip->rxr_dma, 0);
+ if (ip->txr)
+ dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
+ ip->txr_dma, 0);
out_res:
pci_release_regions(pdev);
out_free:
free_netdev(dev);
out_disable:
- /*
- * We should call pci_disable_device(pdev); here if the IOC3 wasn't
+ /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
* such a weird device ...
*/
out:
@@ -1363,16 +1334,19 @@ static void ioc3_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+
+ dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
+ ip->rxr_dma, 0);
+ dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
+ ip->txr_dma, 0);
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
- iounmap(ioc3);
+ iounmap(ip->all_regs);
pci_release_regions(pdev);
free_netdev(dev);
- /*
- * We should call pci_disable_device(pdev); here if the IOC3 wasn't
+ /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
* such a weird device ...
*/
}
@@ -1392,16 +1366,14 @@ static struct pci_driver ioc3_driver = {
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned long data;
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
- unsigned int len;
struct ioc3_etxd *desc;
- uint32_t w0 = 0;
+ unsigned long data;
+ unsigned int len;
int produce;
+ u32 w0 = 0;
- /*
- * IOC3 has a fairly simple minded checksumming hardware which simply
+ /* IOC3 has a fairly simple minded checksumming hardware which simply
* adds up the 1's complement checksum for the entire packet and
* inserts it at an offset which can be specified in the descriptor
* into the transmit packet. This means we have to compensate for the
@@ -1412,25 +1384,23 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
const struct iphdr *ih = ip_hdr(skb);
const int proto = ntohs(ih->protocol);
unsigned int csoff;
- uint32_t csum, ehsum;
- uint16_t *eh;
+ u32 csum, ehsum;
+ u16 *eh;
/* The MAC header. skb->mac seem the logic approach
- to find the MAC header - except it's a NULL pointer ... */
- eh = (uint16_t *) skb->data;
+ * to find the MAC header - except it's a NULL pointer ...
+ */
+ eh = (u16 *)skb->data;
/* Sum up dest addr, src addr and protocol */
ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
- /* Fold ehsum. can't use csum_fold which negates also ... */
- ehsum = (ehsum & 0xffff) + (ehsum >> 16);
- ehsum = (ehsum & 0xffff) + (ehsum >> 16);
-
/* Skip IP header; it's sum is always zero and was
- already filled in by ip_output.c */
+ * already filled in by ip_output.c
+ */
csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
- ih->tot_len - (ih->ihl << 2),
- proto, 0xffff ^ ehsum);
+ ih->tot_len - (ih->ihl << 2),
+ proto, csum_fold(ehsum));
csum = (csum & 0xffff) + (csum >> 16); /* Fold again */
csum = (csum & 0xffff) + (csum >> 16);
@@ -1450,7 +1420,7 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&ip->ioc3_lock);
- data = (unsigned long) skb->data;
+ data = (unsigned long)skb->data;
len = skb->len;
produce = ip->tx_pi;
@@ -1470,47 +1440,78 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long b2 = (data | 0x3fffUL) + 1UL;
unsigned long s1 = b2 - data;
unsigned long s2 = data + len - b2;
+ dma_addr_t d1, d2;
desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
- ETXD_B1V | ETXD_B2V | w0);
+ ETXD_B1V | ETXD_B2V | w0);
desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
- (s2 << ETXD_B2CNT_SHIFT));
- desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
- desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
+ (s2 << ETXD_B2CNT_SHIFT));
+ d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE);
+ if (dma_mapping_error(ip->dma_dev, d1))
+ goto drop_packet;
+ d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE);
+ if (dma_mapping_error(ip->dma_dev, d2)) {
+ dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE);
+ goto drop_packet;
+ }
+ desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF));
+ desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF));
} else {
+ dma_addr_t d;
+
/* Normal sized packet that doesn't cross a page boundary. */
desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
- desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
+ d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ip->dma_dev, d))
+ goto drop_packet;
+ desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF));
}
- BARRIER();
+ mb(); /* make sure all descriptor changes are visible */
ip->tx_skbs[produce] = skb; /* Remember skb */
- produce = (produce + 1) & 127;
+ produce = (produce + 1) & TX_RING_MASK;
ip->tx_pi = produce;
- ioc3_w_etpir(produce << 7); /* Fire ... */
+ writel(produce << 7, &ip->regs->etpir); /* Fire ... */
ip->txqlen++;
- if (ip->txqlen >= 127)
+ if (ip->txqlen >= (TX_RING_ENTRIES - 1))
netif_stop_queue(dev);
spin_unlock_irq(&ip->ioc3_lock);
return NETDEV_TX_OK;
+
+drop_packet:
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return NETDEV_TX_OK;
}
static void ioc3_timeout(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
spin_lock_irq(&ip->ioc3_lock);
ioc3_stop(ip);
+ ioc3_free_rx_bufs(ip);
+ ioc3_clean_tx_ring(ip);
+
ioc3_init(dev);
+ if (ioc3_alloc_rx_bufs(dev)) {
+ netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
+ spin_unlock_irq(&ip->ioc3_lock);
+ return;
+ }
+ ioc3_start(ip);
ioc3_mii_init(ip);
ioc3_mii_start(ip);
@@ -1519,16 +1520,14 @@ static void ioc3_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-/*
- * Given a multicast ethernet address, this routine calculates the
+/* Given a multicast ethernet address, this routine calculates the
* address's bit index in the logical address filter mask
*/
-
static inline unsigned int ioc3_hash(const unsigned char *addr)
{
unsigned int temp = 0;
- u32 crc;
int bits;
+ u32 crc;
crc = ether_crc_le(ETH_ALEN, addr);
@@ -1542,8 +1541,8 @@ static inline unsigned int ioc3_hash(const unsigned char *addr)
return temp;
}
-static void ioc3_get_drvinfo (struct net_device *dev,
- struct ethtool_drvinfo *info)
+static void ioc3_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct ioc3_private *ip = netdev_priv(dev);
@@ -1623,27 +1622,28 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void ioc3_set_multicast_list(struct net_device *dev)
{
- struct netdev_hw_addr *ha;
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
+ struct netdev_hw_addr *ha;
u64 ehar = 0;
- netif_stop_queue(dev); /* Lock out others. */
+ spin_lock_irq(&ip->ioc3_lock);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
ip->emcr |= EMCR_PROMISC;
- ioc3_w_emcr(ip->emcr);
- (void) ioc3_r_emcr();
+ writel(ip->emcr, &regs->emcr);
+ readl(&regs->emcr);
} else {
ip->emcr &= ~EMCR_PROMISC;
- ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
- (void) ioc3_r_emcr();
+ writel(ip->emcr, &regs->emcr); /* Clear promiscuous. */
+ readl(&regs->emcr);
if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > 64)) {
/* Too many for hashing to make sense or we want all
- multicast packets anyway, so skip computing all the
- hashes and just accept all packets. */
+ * multicast packets anyway, so skip computing all the
+ * hashes and just accept all packets.
+ */
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
@@ -1653,11 +1653,11 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ip->ehar_h = ehar >> 32;
ip->ehar_l = ehar & 0xffffffff;
}
- ioc3_w_ehar_h(ip->ehar_h);
- ioc3_w_ehar_l(ip->ehar_l);
+ writel(ip->ehar_h, &regs->ehar_h);
+ writel(ip->ehar_l, &regs->ehar_l);
}
- netif_wake_queue(dev); /* Let us get going again. */
+ spin_unlock_irq(&ip->ioc3_lock);
}
module_pci_driver(ioc3_driver);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 00660dd820e2..539bc5db989c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -247,8 +247,7 @@ static void meth_free_tx_ring(struct meth_private *priv)
/* Remove any pending skb */
for (i = 0; i < TX_RING_ENTRIES; i++) {
- if (priv->tx_skbs[i])
- dev_kfree_skb(priv->tx_skbs[i]);
+ dev_kfree_skb(priv->tx_skbs[i]);
priv->tx_skbs[i] = NULL;
}
dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6e07f5ebacfc..85eaccbbbac1 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -191,6 +191,8 @@ struct sis900_private {
unsigned int tx_full; /* The Tx queue is full. */
u8 host_bridge_rev;
u8 chipset_rev;
+ /* EEPROM data */
+ int eeprom_size;
};
MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
@@ -475,6 +477,8 @@ static int sis900_probe(struct pci_dev *pci_dev,
sis_priv->pci_dev = pci_dev;
spin_lock_init(&sis_priv->lock);
+ sis_priv->eeprom_size = 24;
+
pci_set_drvdata(pci_dev, net_dev);
ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
@@ -2122,6 +2126,68 @@ static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *w
wol->supported = (WAKE_PHY | WAKE_MAGIC);
}
+static int sis900_get_eeprom_len(struct net_device *dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(dev);
+
+ return sis_priv->eeprom_size;
+}
+
+static int sis900_read_eeprom(struct net_device *net_dev, u8 *buf)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ void __iomem *ioaddr = sis_priv->ioaddr;
+ int wait, ret = -EAGAIN;
+ u16 signature;
+ u16 *ebuf = (u16 *)buf;
+ int i;
+
+ if (sis_priv->chipset_rev == SIS96x_900_REV) {
+ sw32(mear, EEREQ);
+ for (wait = 0; wait < 2000; wait++) {
+ if (sr32(mear) & EEGNT) {
+ /* read 16 bits, and index by 16 bits */
+ for (i = 0; i < sis_priv->eeprom_size / 2; i++)
+ ebuf[i] = (u16)read_eeprom(ioaddr, i);
+ ret = 0;
+ break;
+ }
+ udelay(1);
+ }
+ sw32(mear, EEDONE);
+ } else {
+ signature = (u16)read_eeprom(ioaddr, EEPROMSignature);
+ if (signature != 0xffff && signature != 0x0000) {
+ /* read 16 bits, and index by 16 bits */
+ for (i = 0; i < sis_priv->eeprom_size / 2; i++)
+ ebuf[i] = (u16)read_eeprom(ioaddr, i);
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+#define SIS900_EEPROM_MAGIC 0xBABE
+static int sis900_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct sis900_private *sis_priv = netdev_priv(dev);
+ u8 *eebuf;
+ int res;
+
+ eebuf = kmalloc(sis_priv->eeprom_size, GFP_KERNEL);
+ if (!eebuf)
+ return -ENOMEM;
+
+ eeprom->magic = SIS900_EEPROM_MAGIC;
+ spin_lock_irq(&sis_priv->lock);
+ res = sis900_read_eeprom(dev, eebuf);
+ spin_unlock_irq(&sis_priv->lock);
+ if (!res)
+ memcpy(data, eebuf + eeprom->offset, eeprom->len);
+ kfree(eebuf);
+ return res;
+}
+
static const struct ethtool_ops sis900_ethtool_ops = {
.get_drvinfo = sis900_get_drvinfo,
.get_msglevel = sis900_get_msglevel,
@@ -2132,6 +2198,8 @@ static const struct ethtool_ops sis900_ethtool_ops = {
.set_wol = sis900_set_wol,
.get_link_ksettings = sis900_get_link_ksettings,
.set_link_ksettings = sis900_set_link_ksettings,
+ .get_eeprom_len = sis900_get_eeprom_len,
+ .get_eeprom = sis900_get_eeprom,
};
/**
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 601e76ad99a0..3a6761131f4c 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -378,8 +378,7 @@ static void smc_shutdown(struct net_device *dev)
pending_skb = lp->pending_tx_skb;
lp->pending_tx_skb = NULL;
spin_unlock_irq(&lp->lock);
- if (pending_skb)
- dev_kfree_skb(pending_skb);
+ dev_kfree_skb(pending_skb);
/* and tell the card to stay away from that nasty outside world */
SMC_SELECT_BANK(lp, 0);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 51a7b48db4bc..10d0c3e478ab 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1553,7 +1553,6 @@ static int ave_probe(struct platform_device *pdev)
struct ave_private *priv;
struct net_device *ndev;
struct device_node *np;
- struct resource *res;
const void *mac_addr;
void __iomem *base;
const char *name;
@@ -1573,13 +1572,10 @@ static int ave_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "IRQ not found\n");
+ if (irq < 0)
return irq;
- }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 2325b40dff6e..338e25a6374e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -200,6 +200,7 @@ endif
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
+ depends on COMMON_CLK
---help---
This selects the platform specific bus support for the stmmac driver.
This driver was tested on XLINX XC2V3000 FF1152AMT0221
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ed872eed1cab..49aa56ca09cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -75,6 +75,7 @@ struct stmmac_extra_stats {
unsigned long rx_missed_cntr;
unsigned long rx_overflow_cntr;
unsigned long rx_vlan;
+ unsigned long rx_split_hdr_pkt_n;
/* Tx/Rx IRQ error info */
unsigned long tx_undeflow_irq;
unsigned long tx_process_stopped_irq;
@@ -354,6 +355,11 @@ struct dma_features {
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
+ unsigned int rssen;
+ unsigned int vlhash;
+ unsigned int sphen;
+ unsigned int vlins;
+ unsigned int dvlan;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -381,6 +387,16 @@ struct dma_features {
#define JUMBO_LEN 9000
+/* Receive Side Scaling */
+#define STMMAC_RSS_HASH_KEY_SIZE 40
+#define STMMAC_RSS_MAX_TABLE_SIZE 256
+
+/* VLAN */
+#define STMMAC_VLAN_NONE 0x0
+#define STMMAC_VLAN_REMOVE 0x1
+#define STMMAC_VLAN_INSERT 0x2
+#define STMMAC_VLAN_REPLACE 0x3
+
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 6ce3a7fb41ab..527f93320a5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -62,12 +62,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
{
int phy_mode;
- struct resource *res;
void __iomem *ctl_block;
struct anarion_gmac *gmac;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ctl_block = devm_ioremap_resource(&pdev->dev, res);
+ ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n",
PTR_ERR(ctl_block));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 3a14cdd01f5f..dd9967aeda22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -333,6 +333,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
+ /* MDIO bus was already reset just above */
+ data->mdio_bus_data->needs_reset = false;
+
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
err = PTR_ERR(eqos->rst);
@@ -415,7 +418,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct resource *res;
void *priv;
int ret;
@@ -428,17 +430,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
* resource initialization is done in the glue logic.
*/
stmmac_res.irq = platform_get_irq(pdev, 0);
- if (stmmac_res.irq < 0) {
- if (stmmac_res.irq != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "IRQ configuration information not found\n");
-
+ if (stmmac_res.irq < 0)
return stmmac_res.irq;
- }
stmmac_res.wol_irq = stmmac_res.irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res);
+ stmmac_res.addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(stmmac_res.addr))
return PTR_ERR(stmmac_res.addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 88eb16954627..bbc16b5a410a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -46,7 +46,6 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct meson_dwmac *dwmac;
- struct resource *res;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -63,8 +62,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
+ dwmac->reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwmac->reg)) {
ret = PTR_ERR(dwmac->reg);
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 786ca4a7bf36..9cda29e4b89d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -308,7 +308,6 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct resource *res;
struct meson8b_dwmac *dwmac;
int ret;
@@ -332,8 +331,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_remove_config_dt;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
+ dwmac->regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwmac->regs)) {
ret = PTR_ERR(dwmac->regs);
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 3174b701aa90..7357b8bdc128 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -32,6 +32,9 @@
#define XGMAC_CONFIG_ARPEN BIT(31)
#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
#define XGMAC_CONFIG_GPSL_SHIFT 16
+#define XGMAC_CONFIG_HDSMS GENMASK(14, 12)
+#define XGMAC_CONFIG_HDSMS_SHIFT 12
+#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT)
#define XGMAC_CONFIG_S2KP BIT(11)
#define XGMAC_CONFIG_LM BIT(10)
#define XGMAC_CONFIG_IPC BIT(9)
@@ -44,6 +47,7 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_VTFE BIT(16)
#define XGMAC_FILTER_HPF BIT(10)
#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
@@ -51,6 +55,19 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
+#define XGMAC_VLAN_TAG 0x00000050
+#define XGMAC_VLAN_EDVLP BIT(26)
+#define XGMAC_VLAN_VTHM BIT(25)
+#define XGMAC_VLAN_DOVLTC BIT(20)
+#define XGMAC_VLAN_ESVL BIT(18)
+#define XGMAC_VLAN_ETV BIT(16)
+#define XGMAC_VLAN_VID GENMASK(15, 0)
+#define XGMAC_VLAN_HASH_TABLE 0x00000058
+#define XGMAC_VLAN_INCL 0x00000060
+#define XGMAC_VLAN_VLTI BIT(20)
+#define XGMAC_VLAN_CSVL BIT(19)
+#define XGMAC_VLAN_VLC GENMASK(17, 16)
+#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -59,6 +76,7 @@
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_LPIIS BIT(5)
#define XGMAC_PMTIS BIT(4)
#define XGMAC_INT_EN 0x000000b4
#define XGMAC_TSIE BIT(12)
@@ -76,19 +94,34 @@
#define XGMAC_RWKPKTEN BIT(2)
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
+#define XGMAC_LPI_CTRL 0x000000d0
+#define XGMAC_TXCGE BIT(21)
+#define XGMAC_LPITXA BIT(19)
+#define XGMAC_PLS BIT(17)
+#define XGMAC_LPITXEN BIT(16)
+#define XGMAC_RLPIEX BIT(3)
+#define XGMAC_RLPIEN BIT(2)
+#define XGMAC_TLPIEX BIT(1)
+#define XGMAC_TLPIEN BIT(0)
+#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_SAVLANINS BIT(27)
#define XGMAC_HWFEAT_RXCOESEL BIT(16)
#define XGMAC_HWFEAT_TXCOESEL BIT(14)
+#define XGMAC_HWFEAT_EEESEL BIT(13)
#define XGMAC_HWFEAT_TSSEL BIT(12)
#define XGMAC_HWFEAT_AVSEL BIT(11)
#define XGMAC_HWFEAT_RAVSEL BIT(10)
#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
+#define XGMAC_HWFEAT_MMCSEL BIT(8)
#define XGMAC_HWFEAT_MGKSEL BIT(7)
#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_VLHASH BIT(4)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
+#define XGMAC_HWFEAT_RSSEN BIT(20)
#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_SPHEN BIT(17)
#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14)
#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
@@ -98,6 +131,16 @@
#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
+#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
+#define XGMAC_HWFEAT_DVLAN BIT(13)
+#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
+#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9)
+#define XGMAC_HWFEAT_FRPSEL BIT(3)
+#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150
+#define XGMAC_MAC_FSM_CONTROL 0x00000158
+#define XGMAC_PRTYEN BIT(1)
+#define XGMAC_TMOUTEN BIT(0)
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
@@ -108,14 +151,45 @@
#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_ARP_ADDR 0x00000c10
+#define XGMAC_RSS_CTRL 0x00000c80
+#define XGMAC_UDP4TE BIT(3)
+#define XGMAC_TCP4TE BIT(2)
+#define XGMAC_IP2TE BIT(1)
+#define XGMAC_RSSE BIT(0)
+#define XGMAC_RSS_ADDR 0x00000c88
+#define XGMAC_RSSIA_SHIFT 8
+#define XGMAC_ADDRT BIT(2)
+#define XGMAC_CT BIT(1)
+#define XGMAC_OB BIT(0)
+#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
#define XGMAC_TXTSSTSLO GENMASK(30, 0)
#define XGMAC_TXTIMESTAMP_SEC 0x00000d34
+#define XGMAC_PPS_CONTROL 0x00000d70
+#define XGMAC_PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define XGMAC_PPS_MINIDX(x) ((x) * 8)
+#define XGMAC_PPSx_MASK(x) \
+ GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x))
+#define XGMAC_TRGTMODSELx(x, val) \
+ GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \
+ ((val) << (XGMAC_PPS_MAXIDX(x) - 2))
+#define XGMAC_PPSCMDx(x, val) \
+ GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \
+ ((val) << XGMAC_PPS_MINIDX(x))
+#define XGMAC_PPSCMD_START 0x2
+#define XGMAC_PPSCMD_STOP 0x5
+#define XGMAC_PPSEN0 BIT(4)
+#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
+#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
+#define XGMAC_TRGTBUSY0 BIT(31)
+#define XGMAC_PPSx_INTERVAL(x) (0x00000d88 + (x) * 0x10)
+#define XGMAC_PPSx_WIDTH(x) (0x00000d8c + (x) * 0x10)
/* MTL Registers */
#define XGMAC_MTL_OPMODE 0x00001000
+#define XGMAC_FRPE BIT(15)
#define XGMAC_ETSALG GENMASK(6, 5)
#define XGMAC_WRR (0x0 << 5)
#define XGMAC_WFQ (0x1 << 5)
@@ -124,8 +198,32 @@
#define XGMAC_MTL_INT_STATUS 0x00001020
#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
-#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8)
+#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
+#define XGMAC_QDDMACH BIT(7)
+#define XGMAC_TC_PRTY_MAP0 0x00001040
+#define XGMAC_TC_PRTY_MAP1 0x00001044
+#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
+#define XGMAC_RXPI BIT(31)
+#define XGMAC_NPE GENMASK(23, 16)
+#define XGMAC_NVE GENMASK(7, 0)
+#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0
+#define XGMAC_STARTBUSY BIT(31)
+#define XGMAC_WRRDN BIT(16)
+#define XGMAC_ADDR GENMASK(9, 0)
+#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4
+#define XGMAC_MTL_ECC_CONTROL 0x000010c0
+#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4
+#define XGMAC_MEUIS BIT(1)
+#define XGMAC_MECIS BIT(0)
+#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8
+#define XGMAC_RPCEIE BIT(12)
+#define XGMAC_ECEIE BIT(8)
+#define XGMAC_RXCEIE BIT(4)
+#define XGMAC_TXCEIE BIT(0)
+#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -164,6 +262,7 @@
#define XGMAC_RXOVFIS BIT(16)
#define XGMAC_ABPSIS BIT(1)
#define XGMAC_TXUNFIS BIT(0)
+#define XGMAC_MAC_REGSIZE (XGMAC_MTL_QINT_STATUS(15) / 4)
/* DMA Registers */
#define XGMAC_DMA_MODE 0x00003000
@@ -190,7 +289,18 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
+#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
+#define XGMAC_MCSIS BIT(31)
+#define XGMAC_MSUIS BIT(29)
+#define XGMAC_MSCIS BIT(28)
+#define XGMAC_DEUIS BIT(1)
+#define XGMAC_DECIS BIT(0)
+#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068
+#define XGMAC_DCEIE BIT(1)
+#define XGMAC_TCEIE BIT(0)
+#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
#define XGMAC_TxPBL GENMASK(21, 16)
@@ -230,12 +340,17 @@
#define XGMAC_TBU BIT(2)
#define XGMAC_TPS BIT(1)
#define XGMAC_TI BIT(0)
+#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
/* Descriptors */
+#define XGMAC_TDES2_IVT GENMASK(31, 16)
+#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
#define XGMAC_TDES2_TTSE BIT(30)
#define XGMAC_TDES2_B2L GENMASK(29, 16)
#define XGMAC_TDES2_B2L_SHIFT 16
+#define XGMAC_TDES2_VTIR GENMASK(15, 14)
+#define XGMAC_TDES2_VTIR_SHIFT 14
#define XGMAC_TDES2_B1L GENMASK(13, 0)
#define XGMAC_TDES3_OWN BIT(31)
#define XGMAC_TDES3_CTXT BIT(30)
@@ -244,18 +359,33 @@
#define XGMAC_TDES3_CPC GENMASK(27, 26)
#define XGMAC_TDES3_CPC_SHIFT 26
#define XGMAC_TDES3_TCMSSV BIT(26)
+#define XGMAC_TDES3_SAIC GENMASK(25, 23)
+#define XGMAC_TDES3_SAIC_SHIFT 23
#define XGMAC_TDES3_THL GENMASK(22, 19)
#define XGMAC_TDES3_THL_SHIFT 19
+#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
+#define XGMAC_TDES3_IVTIR_SHIFT 18
#define XGMAC_TDES3_TSE BIT(18)
+#define XGMAC_TDES3_IVLTV BIT(17)
#define XGMAC_TDES3_CIC GENMASK(17, 16)
#define XGMAC_TDES3_CIC_SHIFT 16
#define XGMAC_TDES3_TPL GENMASK(17, 0)
+#define XGMAC_TDES3_VLTV BIT(16)
+#define XGMAC_TDES3_VT GENMASK(15, 0)
#define XGMAC_TDES3_FL GENMASK(14, 0)
+#define XGMAC_RDES2_HL GENMASK(9, 0)
#define XGMAC_RDES3_OWN BIT(31)
#define XGMAC_RDES3_CTXT BIT(30)
#define XGMAC_RDES3_IOC BIT(30)
#define XGMAC_RDES3_LD BIT(28)
#define XGMAC_RDES3_CDA BIT(27)
+#define XGMAC_RDES3_RSV BIT(26)
+#define XGMAC_RDES3_L34T GENMASK(23, 20)
+#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_L34T_IP4TCP 0x1
+#define XGMAC_L34T_IP4UDP 0x2
+#define XGMAC_L34T_IP6TCP 0x9
+#define XGMAC_L34T_IP6UDP 0xA
#define XGMAC_RDES3_ES BIT(15)
#define XGMAC_RDES3_PL GENMASK(13, 0)
#define XGMAC_RDES3_TSD BIT(6)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 85c68b7ee8c6..e534a3aaf4a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -6,7 +6,9 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/iopoll.h>
#include "stmmac.h"
+#include "stmmac_ptp.h"
#include "dwxgmac2.h"
static void dwxgmac2_core_init(struct mac_device_info *hw,
@@ -118,6 +120,23 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
writel(value, ioaddr + reg);
}
+static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSTC(queue);
+ value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
+
+ writel(value, ioaddr + reg);
+}
+
static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
u32 rx_alg)
{
@@ -144,7 +163,9 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
u32 tx_alg)
{
void __iomem *ioaddr = hw->pcsr;
+ bool ets = true;
u32 value;
+ int i;
value = readl(ioaddr + XGMAC_MTL_OPMODE);
value &= ~XGMAC_ETSALG;
@@ -160,10 +181,28 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
value |= XGMAC_DWRR;
break;
default:
+ ets = false;
break;
}
writel(value, ioaddr + XGMAC_MTL_OPMODE);
+
+ /* Set ETS if desired */
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ value &= ~XGMAC_TSA;
+ if (ets)
+ value |= XGMAC_ETS;
+ writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ }
+}
+
+static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
}
static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
@@ -200,11 +239,21 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw,
writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
}
+static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
void __iomem *ioaddr = hw->pcsr;
u32 stat, en;
+ int ret = 0;
en = readl(ioaddr + XGMAC_INT_EN);
stat = readl(ioaddr + XGMAC_INT_STATUS);
@@ -216,7 +265,24 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
readl(ioaddr + XGMAC_PMT);
}
- return 0;
+ if (stat & XGMAC_LPIIS) {
+ u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ if (lpi & XGMAC_TLPIEN) {
+ ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_TLPIEX) {
+ ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (lpi & XGMAC_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
+ return ret;
}
static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
@@ -309,6 +375,53 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
+static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ value |= XGMAC_LPITXEN | XGMAC_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= XGMAC_TXCGE;
+
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ if (link)
+ value |= XGMAC_PLS;
+ else
+ value &= ~XGMAC_PLS;
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
+ writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
+}
+
static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
int mcbitslog2)
{
@@ -402,36 +515,694 @@ static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
+static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
+ u32 val)
+{
+ u32 ctrl = 0;
+
+ writel(val, ioaddr + XGMAC_RSS_DATA);
+ ctrl |= idx << XGMAC_RSSIA_SHIFT;
+ ctrl |= is_key ? XGMAC_ADDRT : 0x0;
+ ctrl |= XGMAC_OB;
+ writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
+
+ return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
+ !(ctrl & XGMAC_OB), 100, 10000);
+}
+
+static int dwxgmac2_rss_configure(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 *key = (u32 *)cfg->key;
+ int i, ret;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RSS_CTRL);
+ if (!cfg->enable) {
+ value &= ~XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+ }
+
+ for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_rxq; i++)
+ dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
+
+ value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
+ value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
+ value &= ~XGMAC_VLAN_DOVLTC;
+ value &= ~XGMAC_VLAN_VID;
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ }
+}
+
+struct dwxgmac3_error_desc {
+ bool valid;
+ const char *desc;
+ const char *detailed_desc;
+};
+
+#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
+
+static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
+ const char *module_name,
+ const struct dwxgmac3_error_desc *desc,
+ unsigned long field_offset,
+ struct stmmac_safety_stats *stats)
+{
+ unsigned long loc, mask;
+ u8 *bptr = (u8 *)stats;
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)(bptr + field_offset);
+
+ mask = value;
+ for_each_set_bit(loc, &mask, 32) {
+ netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
+ "correctable" : "uncorrectable", module_name,
+ desc[loc].desc, desc[loc].detailed_desc);
+
+ /* Update counters */
+ ptr[loc]++;
+ }
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
+ { true, "ATPES", "Application Transmit Interface Parity Check Error" },
+ { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
+ { true, "TPES", "TSO Data Path Parity Check Error" },
+ { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
+ { true, "MTPES", "MTL Data Path Parity Check Error" },
+ { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
+ { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
+ { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
+ { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
+ { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
+ { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
+ { true, "CWPES", "CSR Write Data Path Parity Check Error" },
+ { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
+ { true, "TTES", "TX FSM Timeout Error" },
+ { true, "RTES", "RX FSM Timeout Error" },
+ { true, "CTES", "CSR FSM Timeout Error" },
+ { true, "ATES", "APP FSM Timeout Error" },
+ { true, "PTES", "PTP FSM Timeout Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { true, "MSTTES", "Master Read/Write Timeout Error" },
+ { true, "SLVTES", "Slave Read/Write Timeout Error" },
+ { true, "ATITES", "Application Timeout on ATI Interface Error" },
+ { true, "ARITES", "Application Timeout on ARI Interface Error" },
+ { true, "FSMPES", "FSM State Parity Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { true, "CPI", "Control Register Parity Check Error" },
+};
+
+static void dwxgmac3_handle_mac_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MAC",
+ dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
+ { true, "TXCES", "MTL TX Memory Error" },
+ { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
+ { true, "TXUES", "MTL TX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "RXCES", "MTL RX Memory Error" },
+ { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
+ { true, "RXUES", "MTL RX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { true, "ECES", "MTL EST Memory Error" },
+ { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
+ { true, "EUES", "MTL EST Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { true, "RPCES", "MTL RX Parser Memory Error" },
+ { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
+ { true, "RPUES", "MTL RX Parser Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MTL",
+ dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
+ { true, "TCES", "DMA TSO Memory Error" },
+ { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
+ { true, "TUES", "DMA TSO Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "DCES", "DMA DCACHE Memory Error" },
+ { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
+ { true, "DUES", "DMA DCACHE Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 8 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 9 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 10 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 12 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 13 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 14 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "DMA",
+ dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+}
+
+static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
+{
+ u32 value;
+
+ if (!asp)
+ return -EINVAL;
+
+ /* 1. Enable Safety Features */
+ writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
+
+ /* 2. Enable MTL Safety Interrupts */
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+ value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
+ value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
+ value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
+ value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+
+ /* 3. Enable DMA Safety Interrupts */
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+ value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
+ value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+
+ /* Only ECC Protection for External Memory feature is selected */
+ if (asp <= 0x1)
+ return 0;
+
+ /* 4. Enable Parity and Timeout for FSM */
+ value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
+ value |= XGMAC_PRTYEN; /* FSM Parity Feature */
+ value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
+ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+
+ return 0;
+}
+
+static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
+ void __iomem *ioaddr,
+ unsigned int asp,
+ struct stmmac_safety_stats *stats)
+{
+ bool err, corr;
+ u32 mtl, dma;
+ int ret = 0;
+
+ if (!asp)
+ return -EINVAL;
+
+ mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
+ dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
+
+ err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
+ corr = false;
+ if (err) {
+ dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
+ (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
+ corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
+ if (err) {
+ dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ corr = dma & XGMAC_DECIS;
+ if (err) {
+ dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ return ret;
+}
+
+static const struct dwxgmac3_error {
+ const struct dwxgmac3_error_desc *desc;
+} dwxgmac3_all_errors[] = {
+ { dwxgmac3_mac_errors },
+ { dwxgmac3_mtl_errors },
+ { dwxgmac3_dma_errors },
+};
+
+static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count,
+ const char **desc)
+{
+ int module = index / 32, offset = index % 32;
+ unsigned long *ptr = (unsigned long *)stats;
+
+ if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
+ return -EINVAL;
+ if (!dwxgmac3_all_errors[module].desc[offset].valid)
+ return -EINVAL;
+ if (count)
+ *count = *(ptr + index);
+ if (desc)
+ *desc = dwxgmac3_all_errors[module].desc[offset].desc;
+ return 0;
+}
+
+static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
+{
+ u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
+
+ val &= ~XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+
+ return 0;
+}
+
+static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + XGMAC_MTL_OPMODE);
+ val |= XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entry,
+ int pos)
+{
+ int ret, i;
+
+ for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
+ int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
+ u32 val;
+
+ /* Wait for ready */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+
+ /* Write data */
+ val = *((u32 *)&entry->val + i);
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
+
+ /* Write pos */
+ val = real_pos & XGMAC_ADDR;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Write OP */
+ val |= XGMAC_WRRDN;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Start Write */
+ val |= XGMAC_STARTBUSY;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Wait for done */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct stmmac_tc_entry *
+dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
+ unsigned int count, u32 curr_prio)
+{
+ struct stmmac_tc_entry *entry;
+ u32 min_prio = ~0x0;
+ int i, min_prio_idx;
+ bool found = false;
+
+ for (i = count - 1; i >= 0; i--) {
+ entry = &entries[i];
+
+ /* Do not update unused entries */
+ if (!entry->in_use)
+ continue;
+ /* Do not update already updated entries (i.e. fragments) */
+ if (entry->in_hw)
+ continue;
+ /* Let last entry be updated last */
+ if (entry->is_last)
+ continue;
+ /* Do not return fragments */
+ if (entry->is_frag)
+ continue;
+ /* Check if we already checked this prio */
+ if (entry->prio < curr_prio)
+ continue;
+ /* Check if this is the minimum prio */
+ if (entry->prio < min_prio) {
+ min_prio = entry->prio;
+ min_prio_idx = i;
+ found = true;
+ }
+ }
+
+ if (found)
+ return &entries[min_prio_idx];
+ return NULL;
+}
+
+static int dwxgmac3_rxp_config(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entries,
+ unsigned int count)
+{
+ struct stmmac_tc_entry *entry, *frag;
+ int i, ret, nve = 0;
+ u32 curr_prio = 0;
+ u32 old_val, val;
+
+ /* Force disable RX */
+ old_val = readl(ioaddr + XGMAC_RX_CONFIG);
+ val = old_val & ~XGMAC_CONFIG_RE;
+ writel(val, ioaddr + XGMAC_RX_CONFIG);
+
+ /* Disable RX Parser */
+ ret = dwxgmac3_rxp_disable(ioaddr);
+ if (ret)
+ goto re_enable;
+
+ /* Set all entries as NOT in HW */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ entry->in_hw = false;
+ }
+
+ /* Update entries by reverse order */
+ while (1) {
+ entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
+ if (!entry)
+ break;
+
+ curr_prio = entry->prio;
+ frag = entry->frag_ptr;
+
+ /* Set special fragment requirements */
+ if (frag) {
+ entry->val.af = 0;
+ entry->val.rf = 0;
+ entry->val.nc = 1;
+ entry->val.ok_index = nve + 2;
+ }
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ entry->in_hw = true;
+
+ if (frag && !frag->in_hw) {
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
+ if (ret)
+ goto re_enable;
+ frag->table_pos = nve++;
+ frag->in_hw = true;
+ }
+ }
+
+ if (!nve)
+ goto re_enable;
+
+ /* Update all pass entry */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ if (!entry->is_last)
+ continue;
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ }
+
+ /* Assume n. of parsable entries == n. of valid entries */
+ val = (nve << 16) & XGMAC_NPE;
+ val |= nve & XGMAC_NVE;
+ writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
+
+ /* Enable RX Parser */
+ dwxgmac3_rxp_enable(ioaddr);
+
+re_enable:
+ /* Re-enable RX */
+ writel(old_val, ioaddr + XGMAC_RX_CONFIG);
+ return ret;
+}
+
+static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
+ value, value & XGMAC_TXTSC, 100, 10000))
+ return -EBUSY;
+
+ *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
+ *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
+ return 0;
+}
+
+static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags)
+{
+ u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+ u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
+ u64 period;
+
+ if (!cfg->available)
+ return -EINVAL;
+ if (tnsec & XGMAC_TRGTBUSY0)
+ return -EBUSY;
+ if (!sub_second_inc || !systime_flags)
+ return -EINVAL;
+
+ val &= ~XGMAC_PPSx_MASK(index);
+
+ if (!enable) {
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+ }
+
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_PPSEN0;
+
+ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+
+ if (!(systime_flags & PTP_TCR_TSCTRLSSR))
+ cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
+ writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+
+ period = cfg->period.tv_sec * 1000000000;
+ period += cfg->period.tv_nsec;
+
+ do_div(period, sub_second_inc);
+
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
+
+ period >>= 1;
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
+
+ /* Finally, activate it */
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+}
+
+static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
+{
+ u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_SARC;
+ value |= val << XGMAC_CONFIG_SARC_SHIFT;
+
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_VLAN_INCL);
+ value |= XGMAC_VLAN_VLTI;
+ value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
+ value &= ~XGMAC_VLAN_VLC;
+ value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
+ writel(value, ioaddr + XGMAC_VLAN_INCL);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
.rx_queue_prio = dwxgmac2_rx_queue_prio,
- .tx_queue_prio = NULL,
+ .tx_queue_prio = dwxgmac2_tx_queue_prio,
.rx_queue_routing = NULL,
.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
- .set_mtl_tx_queue_weight = NULL,
+ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
.config_cbs = dwxgmac2_config_cbs,
- .dump_regs = NULL,
+ .dump_regs = dwxgmac2_dump_regs,
.host_irq_status = dwxgmac2_host_irq_status,
.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
.flow_ctrl = dwxgmac2_flow_ctrl,
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = NULL,
- .reset_eee_mode = NULL,
- .set_eee_timer = NULL,
- .set_eee_pls = NULL,
+ .set_eee_mode = dwxgmac2_set_eee_mode,
+ .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_eee_timer = dwxgmac2_set_eee_timer,
+ .set_eee_pls = dwxgmac2_set_eee_pls,
.pcs_ctrl_ane = NULL,
.pcs_rane = NULL,
.pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
+ .safety_feat_config = dwxgmac3_safety_feat_config,
+ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
+ .safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
+ .rss_configure = dwxgmac2_rss_configure,
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .rxp_config = dwxgmac3_rxp_config,
+ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
+ .flex_pps_config = dwxgmac2_flex_pps_config,
+ .sarc_configure = dwxgmac2_sarc_configure,
+ .enable_vlan = dwxgmac2_enable_vlan,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index c4c45402b8f8..ae48154f933c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -26,16 +26,17 @@ static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes3 = le32_to_cpu(p->des3);
- int ret = good_frame;
if (unlikely(rdes3 & XGMAC_RDES3_OWN))
return dma_own;
+ if (unlikely(rdes3 & XGMAC_RDES3_CTXT))
+ return discard_frame;
if (likely(!(rdes3 & XGMAC_RDES3_LD)))
+ return rx_not_ls;
+ if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD)))
return discard_frame;
- if (unlikely(rdes3 & XGMAC_RDES3_ES))
- ret = discard_frame;
- return ret;
+ return good_frame;
}
static int dwxgmac2_get_tx_len(struct dma_desc *p)
@@ -55,7 +56,7 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p)
static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 = cpu_to_le32(XGMAC_RDES3_OWN);
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
@@ -98,11 +99,17 @@ static int dwxgmac2_rx_check_timestamp(void *desc)
unsigned int rdes3 = le32_to_cpu(p->des3);
bool desc_valid, ts_valid;
+ dma_rmb();
+
desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
- if (likely(desc_valid && ts_valid))
+ if (likely(desc_valid && ts_valid)) {
+ if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ return -EINVAL;
return 0;
+ }
+
return -EINVAL;
}
@@ -113,13 +120,10 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
unsigned int rdes3 = le32_to_cpu(p->des3);
int ret = -EBUSY;
- if (likely(rdes3 & XGMAC_RDES3_CDA)) {
+ if (likely(rdes3 & XGMAC_RDES3_CDA))
ret = dwxgmac2_rx_check_timestamp(next_desc);
- if (ret)
- return ret;
- }
- return ret;
+ return !ret;
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -144,7 +148,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
- tdes3 = tot_pkt_len & XGMAC_TDES3_FL;
+ tdes3 |= tot_pkt_len & XGMAC_TDES3_FL;
if (is_fs)
tdes3 |= XGMAC_TDES3_FD;
else
@@ -254,6 +258,86 @@ static void dwxgmac2_clear(struct dma_desc *p)
p->des3 = 0;
}
+static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 ptype;
+
+ if (rdes3 & XGMAC_RDES3_RSV) {
+ ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
+
+ switch (ptype) {
+ case XGMAC_L34T_IP4TCP:
+ case XGMAC_L34T_IP4UDP:
+ case XGMAC_L34T_IP6TCP:
+ case XGMAC_L34T_IP6UDP:
+ *type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ *type = PKT_HASH_TYPE_L3;
+ break;
+ }
+
+ *hash = le32_to_cpu(p->des1);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
+ return 0;
+}
+
+static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr));
+}
+
+static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
+{
+ sarc_type <<= XGMAC_TDES3_SAIC_SHIFT;
+
+ p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
+}
+
+static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+
+ /* Inner VLAN */
+ if (inner_type) {
+ u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT;
+
+ des &= XGMAC_TDES2_IVT;
+ p->des2 = cpu_to_le32(des);
+
+ des = inner_type << XGMAC_TDES3_IVTIR_SHIFT;
+ des &= XGMAC_TDES3_IVTIR;
+ p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
+ }
+
+ /* Outer VLAN */
+ p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT);
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV);
+
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT);
+}
+
+static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
+{
+ type <<= XGMAC_TDES2_VTIR_SHIFT;
+ p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
+}
+
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
@@ -277,4 +361,10 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.get_addr = dwxgmac2_get_addr,
.set_addr = dwxgmac2_set_addr,
.clear = dwxgmac2_clear,
+ .get_rx_hash = dwxgmac2_get_rx_hash,
+ .get_rx_header_len = dwxgmac2_get_rx_header_len,
+ .set_sec_addr = dwxgmac2_set_sec_addr,
+ .set_sarc = dwxgmac2_set_sarc,
+ .set_vlan_tag = dwxgmac2_set_vlan_tag,
+ .set_vlan = dwxgmac2_set_vlan,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index a4f236e3593e..64956465c030 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -128,6 +128,14 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
}
+static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
@@ -351,18 +359,24 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
+ dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+ dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+ dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
switch (dma_cap->addr64) {
@@ -396,6 +410,14 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
dma_cap->number_rx_queues =
((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
+
+ /* MAC HW feature 3 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
+ dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
+ dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
+ dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
+ dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
}
static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
@@ -462,6 +484,22 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
+static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_HDSMS;
+ value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ if (en)
+ value |= XGMAC_SPH;
+ else
+ value &= ~XGMAC_SPH;
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.reset = dwxgmac2_dma_reset,
.init = dwxgmac2_dma_init,
@@ -469,7 +507,7 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.init_rx_chan = dwxgmac2_dma_init_rx_chan,
.init_tx_chan = dwxgmac2_dma_init_tx_chan,
.axi = dwxgmac2_dma_axi,
- .dump_regs = NULL,
+ .dump_regs = dwxgmac2_dma_dump_regs,
.dma_rx_mode = dwxgmac2_dma_rx_mode,
.dma_tx_mode = dwxgmac2_dma_tx_mode,
.enable_dma_irq = dwxgmac2_enable_dma_irq,
@@ -488,4 +526,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.enable_tso = dwxgmac2_enable_tso,
.qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
+ .enable_sph = dwxgmac2_enable_sph,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 6c61b753b55e..3af2e5015245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry {
.min_id = DWXGMAC_CORE_2_10,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
- .mmc_off = 0,
+ .mmc_off = MMC_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -209,7 +209,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
- .mmc = NULL,
+ .mmc = &dwxgmac_mmc_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 278c0dbec9d9..9435b312495d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -86,6 +86,15 @@ struct stmmac_desc_ops {
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
/* clear descriptor */
void (*clear)(struct dma_desc *p);
+ /* RSS */
+ int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type);
+ int (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
+ void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
+ void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
+ void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type);
+ void (*set_vlan)(struct dma_desc *p, u32 type);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -136,6 +145,18 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_addr, __args)
#define stmmac_clear_desc(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, clear, __args)
+#define stmmac_get_rx_hash(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_hash, __args)
+#define stmmac_get_rx_header_len(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_header_len, __args)
+#define stmmac_set_desc_sec_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sec_addr, __args)
+#define stmmac_set_desc_sarc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sarc, __args)
+#define stmmac_set_desc_vlan_tag(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
+#define stmmac_set_desc_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -186,6 +207,7 @@ struct stmmac_dma_ops {
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
+ void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -242,6 +264,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, qmode, __args)
#define stmmac_set_dma_bfsize(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
+#define stmmac_enable_sph(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_sph, __args)
struct mac_device_info;
struct net_device;
@@ -249,6 +273,7 @@ struct rgmii_adv;
struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
+struct stmmac_rss;
/* Helpers to program the MAC core */
struct stmmac_ops {
@@ -327,6 +352,17 @@ struct stmmac_ops {
u32 sub_second_inc, u32 systime_flags);
/* Loopback for selftests */
void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
+ /* RSS */
+ int (*rss_configure)(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq);
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ /* TX Timestamp */
+ int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
+ /* Source Address Insertion / Replacement */
+ void (*sarc_configure)(void __iomem *ioaddr, int val);
};
#define stmmac_core_init(__priv, __args...) \
@@ -397,6 +433,16 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, flex_pps_config, __args)
#define stmmac_set_mac_loopback(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
+#define stmmac_rss_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rss_configure, __args)
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
+#define stmmac_sarc_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, sarc_configure, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -503,6 +549,7 @@ extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 3587ceb9faf5..a0c05925883e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -24,6 +24,7 @@
#define MMC_GMAC4_OFFSET 0x700
#define MMC_GMAC3_X_OFFSET 0x100
+#define MMC_XGMAC_OFFSET 0x800
struct stmmac_counters {
unsigned int mmc_tx_octetcount_gb;
@@ -116,6 +117,14 @@ struct stmmac_counters {
unsigned int mmc_rx_tcp_err_octets;
unsigned int mmc_rx_icmp_gd_octets;
unsigned int mmc_rx_icmp_err_octets;
+
+ /* FPE */
+ unsigned int mmc_tx_fpe_fragment_cntr;
+ unsigned int mmc_tx_hold_req_cntr;
+ unsigned int mmc_rx_packet_assembly_err_cntr;
+ unsigned int mmc_rx_packet_smd_err_cntr;
+ unsigned int mmc_rx_packet_assembly_ok_cntr;
+ unsigned int mmc_rx_fpe_fragment_cntr;
};
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a471db6d7b11..a223584f5f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -119,6 +119,64 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
+/* XGMAC MMC Registers */
+#define MMC_XGMAC_TX_OCTET_GB 0x14
+#define MMC_XGMAC_TX_PKT_GB 0x1c
+#define MMC_XGMAC_TX_BROAD_PKT_G 0x24
+#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c
+#define MMC_XGMAC_TX_64OCT_GB 0x34
+#define MMC_XGMAC_TX_65OCT_GB 0x3c
+#define MMC_XGMAC_TX_128OCT_GB 0x44
+#define MMC_XGMAC_TX_256OCT_GB 0x4c
+#define MMC_XGMAC_TX_512OCT_GB 0x54
+#define MMC_XGMAC_TX_1024OCT_GB 0x5c
+#define MMC_XGMAC_TX_UNI_PKT_GB 0x64
+#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c
+#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74
+#define MMC_XGMAC_TX_UNDER 0x7c
+#define MMC_XGMAC_TX_OCTET_G 0x84
+#define MMC_XGMAC_TX_PKT_G 0x8c
+#define MMC_XGMAC_TX_PAUSE 0x94
+#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c
+#define MMC_XGMAC_TX_LPI_USEC 0xa4
+#define MMC_XGMAC_TX_LPI_TRAN 0xa8
+
+#define MMC_XGMAC_RX_PKT_GB 0x100
+#define MMC_XGMAC_RX_OCTET_GB 0x108
+#define MMC_XGMAC_RX_OCTET_G 0x110
+#define MMC_XGMAC_RX_BROAD_PKT_G 0x118
+#define MMC_XGMAC_RX_MULTI_PKT_G 0x120
+#define MMC_XGMAC_RX_CRC_ERR 0x128
+#define MMC_XGMAC_RX_RUNT_ERR 0x130
+#define MMC_XGMAC_RX_JABBER_ERR 0x134
+#define MMC_XGMAC_RX_UNDER 0x138
+#define MMC_XGMAC_RX_OVER 0x13c
+#define MMC_XGMAC_RX_64OCT_GB 0x140
+#define MMC_XGMAC_RX_65OCT_GB 0x148
+#define MMC_XGMAC_RX_128OCT_GB 0x150
+#define MMC_XGMAC_RX_256OCT_GB 0x158
+#define MMC_XGMAC_RX_512OCT_GB 0x160
+#define MMC_XGMAC_RX_1024OCT_GB 0x168
+#define MMC_XGMAC_RX_UNI_PKT_G 0x170
+#define MMC_XGMAC_RX_LENGTH_ERR 0x178
+#define MMC_XGMAC_RX_RANGE 0x180
+#define MMC_XGMAC_RX_PAUSE 0x188
+#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190
+#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198
+#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0
+#define MMC_XGMAC_RX_LPI_USEC 0x1a4
+#define MMC_XGMAC_RX_LPI_TRAN 0x1a8
+#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac
+#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
+#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+
+#define MMC_XGMAC_TX_FPE_FRAG 0x208
+#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
+#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
+#define MMC_XGMAC_RX_FPE_FRAG 0x234
+
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -263,3 +321,137 @@ const struct stmmac_mmc_ops dwmac_mmc_ops = {
.intr_all_mask = dwmac_mmc_intr_all_mask,
.read = dwmac_mmc_read,
};
+
+static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+{
+ u32 value = readl(mmcaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, mmcaddr + MMC_CNTRL);
+}
+
+static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+{
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+}
+
+static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
+{
+ u64 tmp = 0;
+
+ tmp += readl(addr + reg);
+ tmp += ((u64 )readl(addr + reg + 0x4)) << 32;
+ if (tmp > GENMASK(31, 0))
+ *dest = ~0x0;
+ else
+ *dest = *dest + tmp;
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
+{
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB,
+ &mmc->mmc_tx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB,
+ &mmc->mmc_tx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G,
+ &mmc->mmc_tx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G,
+ &mmc->mmc_tx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB,
+ &mmc->mmc_tx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB,
+ &mmc->mmc_tx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB,
+ &mmc->mmc_tx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB,
+ &mmc->mmc_tx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB,
+ &mmc->mmc_tx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB,
+ &mmc->mmc_tx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB,
+ &mmc->mmc_tx_unicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB,
+ &mmc->mmc_tx_multicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB,
+ &mmc->mmc_tx_broadcast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER,
+ &mmc->mmc_tx_underflow_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G,
+ &mmc->mmc_tx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G,
+ &mmc->mmc_tx_framecount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE,
+ &mmc->mmc_tx_pause_frame);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
+ &mmc->mmc_tx_vlan_frame_g);
+
+ /* MMC RX counter registers */
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
+ &mmc->mmc_rx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB,
+ &mmc->mmc_rx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G,
+ &mmc->mmc_rx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G,
+ &mmc->mmc_rx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G,
+ &mmc->mmc_rx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB,
+ &mmc->mmc_rx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB,
+ &mmc->mmc_rx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB,
+ &mmc->mmc_rx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB,
+ &mmc->mmc_rx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB,
+ &mmc->mmc_rx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB,
+ &mmc->mmc_rx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G,
+ &mmc->mmc_rx_unicast_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR,
+ &mmc->mmc_rx_length_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE,
+ &mmc->mmc_rx_autofrangetype);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE,
+ &mmc->mmc_rx_pause_frames);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT,
+ &mmc->mmc_rx_fifo_overflow);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
+ &mmc->mmc_rx_vlan_frames_gb);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
+}
+
+const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
+ .ctrl = dwxgmac_mmc_ctrl,
+ .intr_all_mask = dwxgmac_mmc_intr_all_mask,
+ .read = dwxgmac_mmc_read,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5cd966c154f3..dcb2e29a5717 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -13,6 +13,7 @@
#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
+#include <linux/if_vlan.h>
#include <linux/stmmac.h>
#include <linux/phylink.h>
#include <linux/pci.h>
@@ -57,7 +58,9 @@ struct stmmac_tx_queue {
struct stmmac_rx_buffer {
struct page *page;
+ struct page *sec_page;
dma_addr_t addr;
+ dma_addr_t sec_addr;
};
struct stmmac_rx_queue {
@@ -73,6 +76,12 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
};
struct stmmac_channel {
@@ -113,6 +122,12 @@ struct stmmac_pps_cfg {
struct timespec64 period;
};
+struct stmmac_rss {
+ int enable;
+ u8 key[STMMAC_RSS_HASH_KEY_SIZE];
+ u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames;
@@ -123,6 +138,8 @@ struct stmmac_priv {
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
+ int sph;
+ u32 sarc_type;
unsigned int dma_buf_sz;
unsigned int rx_copybreak;
@@ -185,11 +202,10 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
- struct dentry *dbgfs_rings_status;
- struct dentry *dbgfs_dma_cap;
#endif
unsigned long state;
@@ -203,6 +219,9 @@ struct stmmac_priv {
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
+
+ /* Receive Side Scaling */
+ struct stmmac_rss rss;
};
enum stmmac_state {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 6efb66820d4c..1c450105e5a6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -18,10 +18,12 @@
#include "stmmac.h"
#include "dwmac_dma.h"
+#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
+#define XGMAC_ETHTOOL_NAME "st_xgmac"
#define ETHTOOL_DMA_OFFSET 55
@@ -65,6 +67,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_missed_cntr),
STMMAC_STAT(rx_overflow_cntr),
STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(rx_split_hdr_pkt_n),
/* Tx/Rx IRQ error info */
STMMAC_STAT(tx_undeflow_irq),
STMMAC_STAT(tx_process_stopped_irq),
@@ -243,6 +246,12 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+ STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
+ STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
+ STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr),
};
#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
@@ -253,6 +262,8 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
if (priv->plat->has_gmac || priv->plat->has_gmac4)
strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ else if (priv->plat->has_xgmac)
+ strlcpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
@@ -398,23 +409,28 @@ static int stmmac_check_if_running(struct net_device *dev)
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_xgmac)
+ return XGMAC_REGSIZE * 4;
return REG_SPACE_SIZE;
}
static void stmmac_ethtool_gregs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
- u32 *reg_space = (u32 *) space;
-
struct stmmac_priv *priv = netdev_priv(dev);
-
- memset(reg_space, 0x0, REG_SPACE_SIZE);
+ u32 *reg_space = (u32 *) space;
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
- /* Copy DMA registers to where ethtool expects them */
- memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
- NUM_DWMAC1000_DMA_REGS * 4);
+
+ if (!priv->plat->has_xgmac) {
+ /* Copy DMA registers to where ethtool expects them */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[DMA_BUS_MODE / 4],
+ NUM_DWMAC1000_DMA_REGS * 4);
+ }
}
static int stmmac_nway_reset(struct net_device *dev)
@@ -758,6 +774,76 @@ static int stmmac_set_coalesce(struct net_device *dev,
return 0;
}
+static int stmmac_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->plat->rx_queues_to_use;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return sizeof(priv->rss.key);
+}
+
+static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return ARRAY_SIZE(priv->rss.table);
+}
+
+static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ indir[i] = priv->rss.table[i];
+ }
+
+ if (key)
+ memcpy(key, priv->rss.key, sizeof(priv->rss.key));
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = indir[i];
+ }
+
+ if (key)
+ memcpy(priv->rss.key, key, sizeof(priv->rss.key));
+
+ return stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
static int stmmac_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -849,6 +935,11 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
+ .get_rxnfc = stmmac_get_rxnfc,
+ .get_rxfh_key_size = stmmac_get_rxfh_key_size,
+ .get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
+ .get_rxfh = stmmac_get_rxfh,
+ .set_rxfh = stmmac_set_rxfh,
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fd54c7c87485..06ccd216ae90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
-static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(struct net_device *dev);
#endif
@@ -432,6 +432,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
struct dma_desc *p, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamp;
+ bool found = false;
u64 ns = 0;
if (!priv->hwts_tx_en)
@@ -443,9 +444,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
/* check tx tstamp status */
if (stmmac_get_tx_timestamp_status(priv, p)) {
- /* get the valid tstamp */
stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+ if (found) {
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -453,8 +458,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
/* pass tstamp to stack */
skb_tstamp_tx(skb, &shhwtstamp);
}
-
- return;
}
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
@@ -1198,6 +1201,17 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
if (!buf->page)
return -ENOMEM;
+ if (priv->sph) {
+ buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->sec_page)
+ return -ENOMEM;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
+ } else {
+ buf->sec_page = NULL;
+ }
+
buf->addr = page_pool_get_dma_addr(buf->page);
stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
@@ -1220,6 +1234,10 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
if (buf->page)
page_pool_put_page(rx_q->page_pool, buf->page, false);
buf->page = NULL;
+
+ if (buf->sec_page)
+ page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
+ buf->sec_page = NULL;
}
/**
@@ -2417,6 +2435,22 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
}
}
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
+ priv->rss.enable = false;
+ return;
+ }
+
+ if (priv->dev->features & NETIF_F_RXHASH)
+ priv->rss.enable = true;
+ else
+ priv->rss.enable = false;
+
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
/**
* stmmac_mtl_configuration - Configure MTL
* @priv: driver private structure
@@ -2461,6 +2495,10 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
/* Set RX routing */
if (rx_queues_count > 1)
stmmac_mac_config_rx_queues_routing(priv);
+
+ /* Receive Side Scaling */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rss(priv);
}
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
@@ -2573,6 +2611,16 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
+ /* Enable Split Header */
+ if (priv->sph && priv->hw->rx_csum) {
+ for (chan = 0; chan < rx_cnt; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
+ }
+
+ /* VLAN Tag Insertion */
+ if (priv->dma_cap.vlins)
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2750,6 +2798,33 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
+ struct stmmac_tx_queue *tx_q)
+{
+ u16 tag = 0x0, inner_tag = 0x0;
+ u32 inner_type = 0x0;
+ struct dma_desc *p;
+
+ if (!priv->dma_cap.vlins)
+ return false;
+ if (!skb_vlan_tag_present(skb))
+ return false;
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ inner_tag = skb_vlan_tag_get(skb);
+ inner_type = STMMAC_VLAN_INSERT;
+ }
+
+ tag = skb_vlan_tag_get(skb);
+
+ p = tx_q->dma_tx + tx_q->cur_tx;
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ return false;
+
+ stmmac_set_tx_owner(priv, p);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ return true;
+}
+
/**
* stmmac_tso_allocator - close entry point of the driver
* @priv: driver private structure
@@ -2829,12 +2904,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
- unsigned int first_entry;
struct stmmac_tx_queue *tx_q;
+ unsigned int first_entry;
int tmp_pay_len = 0;
u32 pay_len, mss;
u8 proto_hdr_len;
dma_addr_t des;
+ bool has_vlan;
int i;
tx_q = &priv->tx_queue[queue];
@@ -2876,12 +2952,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry;
first = desc;
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
/* first descriptor: fill Headers on Buf1 */
des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
@@ -2960,6 +3042,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
skb_tx_timestamp(skb);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
@@ -3038,6 +3123,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int first_entry;
unsigned int enh_desc;
dma_addr_t des;
+ bool has_vlan;
int entry;
tx_q = &priv->tx_queue[queue];
@@ -3063,6 +3149,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
entry = tx_q->cur_tx;
first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -3076,6 +3165,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
@@ -3173,6 +3265,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
skb_tx_timestamp(skb);
/* Ready to fill the first descriptor and set the OWN bit w/o any
@@ -3292,6 +3387,17 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->sec_page)
+ break;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+
+ dma_sync_single_for_device(priv->device, buf->sec_addr,
+ len, DMA_FROM_DEVICE);
+ }
+
buf->addr = page_pool_get_dma_addr(buf->page);
/* Sync whole allocation to device. This will invalidate old
@@ -3301,6 +3407,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
DMA_FROM_DEVICE);
stmmac_set_desc_addr(priv, p, buf->addr);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
stmmac_refill_desc3(priv, rx_q, p);
rx_q->rx_count_frames++;
@@ -3330,9 +3437,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int status = 0, coe = priv->hw->rx_csum;
unsigned int next_entry = rx_q->cur_rx;
- int coe = priv->hw->rx_csum;
- unsigned int count = 0;
+ struct sk_buff *skb = NULL;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3346,10 +3454,30 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
+ unsigned int hlen = 0, prev_len = 0;
+ enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
struct dma_desc *np, *p;
- int entry, status;
+ unsigned int sec_len;
+ int entry;
+ u32 hash;
+
+ if (!count && rx_q->state_saved) {
+ skb = rx_q->state.skb;
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+ if (count >= limit)
+ break;
+
+read_again:
+ sec_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -3376,6 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
np = rx_q->dma_rx + next_entry;
prefetch(np);
+ prefetch(page_address(buf->page));
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->dev->stats,
@@ -3384,26 +3513,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++;
buf->page = NULL;
- } else {
- struct sk_buff *skb;
- int frame_len;
- unsigned int des;
+ error = 1;
+ }
- stmmac_get_desc_addr(priv, p, &des);
- frame_len = stmmac_get_rx_frame_len(priv, p, coe);
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
- /* If frame length is greater than skb buffer size
- * (preallocated during init) then the packet is
- * ignored
- */
- if (frame_len > priv->dma_buf_sz) {
- if (net_ratelimit())
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
- priv->dev->stats.rx_length_errors++;
- continue;
- }
+ /* Buffer is good. Go on. */
+
+ if (likely(status & rx_not_ls)) {
+ len += priv->dma_buf_sz;
+ } else {
+ prev_len = len;
+ len = stmmac_get_rx_frame_len(priv, p, coe);
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
@@ -3414,53 +3540,97 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
*/
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))
- frame_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+
+ if (!skb) {
+ int ret = stmmac_get_rx_header_len(priv, p, &hlen);
- if (netif_msg_rx_status(priv)) {
- netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, des);
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
- frame_len, status);
+ if (priv->sph && !ret && (hlen > 0)) {
+ sec_len = len;
+ if (!(status & rx_not_ls))
+ sec_len = sec_len - hlen;
+ len = hlen;
+
+ prefetch(page_address(buf->sec_page));
+ priv->xstats.rx_split_hdr_pkt_n++;
}
- skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
- if (unlikely(!skb)) {
+ skb = napi_alloc_skb(&ch->rx_napi, len);
+ if (!skb) {
priv->dev->stats.rx_dropped++;
continue;
}
- dma_sync_single_for_cpu(priv->device, buf->addr,
- frame_len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(priv->device, buf->addr, len,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
- frame_len);
- skb_put(skb, frame_len);
-
- if (netif_msg_pktdata(priv)) {
- netdev_dbg(priv->dev, "frame received (%dbytes)",
- frame_len);
- print_pkt(skb->data, frame_len);
- }
+ len);
+ skb_put(skb, len);
- stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ } else {
+ unsigned int buf_len = len - prev_len;
- stmmac_rx_vlan(priv->dev, skb);
+ if (likely(status & rx_not_ls))
+ buf_len = priv->dma_buf_sz;
- skb->protocol = eth_type_trans(skb, priv->dev);
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf_len, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->page, 0, buf_len,
+ priv->dma_buf_sz);
- if (unlikely(!coe))
- skb_checksum_none_assert(skb);
- else
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ }
- napi_gro_receive(&ch->rx_napi, skb);
+ if (sec_len > 0) {
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
+ sec_len, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->sec_page, 0, sec_len,
+ priv->dma_buf_sz);
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
- buf->page = NULL;
+ len += sec_len;
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += frame_len;
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ buf->sec_page = NULL;
}
+
+ if (likely(status & rx_not_ls))
+ goto read_again;
+
+ /* Got entire packet into SKB. Finish it. */
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rx_napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+ }
+
+ if (status & rx_not_ls) {
+ rx_q->state_saved = true;
+ rx_q->state.skb = skb;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
}
stmmac_rx_refill(priv, queue);
@@ -3606,6 +3776,8 @@ static int stmmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ bool sph_en;
+ u32 chan;
/* Keep the COE Type in case of csum is supporting */
if (features & NETIF_F_RXCSUM)
@@ -3617,6 +3789,10 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+
return 0;
}
@@ -3962,54 +4138,102 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
-static int stmmac_init_fs(struct net_device *dev)
+static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
/* Create per netdev entries */
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
- if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
- netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
+ /* Entry to report DMA RX/TX rings */
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
+ &stmmac_rings_status_fops);
- return -ENOMEM;
+ /* Entry to report the DMA HW features */
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
+ &stmmac_dma_cap_fops);
+}
+
+static void stmmac_exit_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 crc = ~0x0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= 0xedb88320;
}
- /* Entry to report DMA RX/TX rings */
- priv->dbgfs_rings_status =
- debugfs_create_file("descriptors_status", 0444,
- priv->dbgfs_dir, dev,
- &stmmac_rings_status_fops);
+ return crc;
+}
- if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
- netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
- debugfs_remove_recursive(priv->dbgfs_dir);
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+{
+ u32 crc, hash = 0;
+ u16 vid;
- return -ENOMEM;
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
+ __le16 vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
+ hash |= (1 << crc);
}
- /* Entry to report the DMA HW features */
- priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
- priv->dbgfs_dir,
- dev, &stmmac_dma_cap_fops);
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
+}
+
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
- if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
- netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
- debugfs_remove_recursive(priv->dbgfs_dir);
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
- return -ENOMEM;
+ set_bit(vid, priv->active_vlans);
+ ret = stmmac_vlan_update(priv, is_double);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ return ret;
}
- return 0;
+ return ret;
}
-static void stmmac_exit_fs(struct net_device *dev)
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
- debugfs_remove_recursive(priv->dbgfs_dir);
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+ return stmmac_vlan_update(priv, is_double);
}
-#endif /* CONFIG_DEBUG_FS */
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
@@ -4027,6 +4251,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_poll_controller = stmmac_poll_controller,
#endif
.ndo_set_mac_address = stmmac_set_mac_address,
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -4175,8 +4401,8 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
- u32 queue, maxq;
- int ret = 0;
+ u32 queue, rxq, maxq;
+ int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
@@ -4259,6 +4485,12 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
+ if (priv->dma_cap.sphen) {
+ ndev->hw_features |= NETIF_F_GRO;
+ priv->sph = true;
+ dev_info(priv->device, "SPH feature enabled\n");
+ }
+
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
@@ -4281,9 +4513,27 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ if (priv->dma_cap.vlhash) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ if (priv->dma_cap.vlins) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (priv->dma_cap.dvlan)
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ /* Initialize RSS */
+ rxq = priv->plat->rx_queues_to_use;
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
+
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
+ ndev->features |= NETIF_F_RXHASH;
+
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
@@ -4368,10 +4618,7 @@ int stmmac_dvr_probe(struct device *device,
}
#ifdef CONFIG_DEBUG_FS
- ret = stmmac_init_fs(ndev);
- if (ret < 0)
- netdev_warn(priv->dev, "%s: failed debugFS registration\n",
- __func__);
+ stmmac_init_fs(ndev);
#endif
return ret;
@@ -4617,16 +4864,8 @@ static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
/* Create debugfs main directory if it doesn't exist yet */
- if (!stmmac_fs_dir) {
+ if (!stmmac_fs_dir)
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
-
- if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
- pr_err("ERROR %s, debugfs create directory failed\n",
- STMMAC_RESOURCE_NAME);
-
- return -ENOMEM;
- }
- }
#endif
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 4304c1abc5d1..40c42637ad75 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -348,7 +348,9 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
- new_bus->reset = &stmmac_mdio_reset;
+ if (mdio_bus_data->needs_reset)
+ new_bus->reset = &stmmac_mdio_reset;
+
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 86f9c07a38cf..20906287b6d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -9,6 +9,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
@@ -63,6 +64,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
+ plat->mdio_bus_data->needs_reset = true;
plat->mdio_bus_data->phy_mask = 0;
/* Set default value for multicast hash bins */
@@ -107,6 +109,166 @@ static const struct stmmac_pci_info stmmac_pci_info = {
.setup = stmmac_default_data,
};
+static int intel_mgbe_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int i;
+
+ plat->clk_csr = 5;
+ plat->has_gmac = 0;
+ plat->has_gmac4 = 1;
+ plat->force_sf_dma_mode = 0;
+ plat->tso_en = 1;
+
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+ plat->rx_queues_cfg[i].chan = i;
+
+ /* Disable Priority config by default */
+ plat->rx_queues_cfg[i].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[i].pkt_route = 0x0;
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[i].use_prio = false;
+ }
+
+ /* FIFO size is 4096 bytes for 1 tx/rx queue */
+ plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
+ plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
+
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ plat->tx_queues_cfg[0].weight = 0x09;
+ plat->tx_queues_cfg[1].weight = 0x0A;
+ plat->tx_queues_cfg[2].weight = 0x0B;
+ plat->tx_queues_cfg[3].weight = 0x0C;
+ plat->tx_queues_cfg[4].weight = 0x0D;
+ plat->tx_queues_cfg[5].weight = 0x0E;
+ plat->tx_queues_cfg[6].weight = 0x0F;
+ plat->tx_queues_cfg[7].weight = 0x10;
+
+ plat->mdio_bus_data->phy_mask = 0;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 0;
+ plat->dma_cfg->mixed_burst = 0;
+ plat->dma_cfg->aal = 0;
+
+ plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
+ GFP_KERNEL);
+ if (!plat->axi)
+ return -ENOMEM;
+
+ plat->axi->axi_lpi_en = 0;
+ plat->axi->axi_xit_frm = 0;
+ plat->axi->axi_wr_osr_lmt = 1;
+ plat->axi->axi_rd_osr_lmt = 1;
+ plat->axi->axi_blen[0] = 4;
+ plat->axi->axi_blen[1] = 8;
+ plat->axi->axi_blen[2] = 16;
+
+ plat->ptp_max_adj = plat->clk_ptp_rate;
+
+ /* Set system clock */
+ plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
+ "stmmac-clk", NULL, 0,
+ plat->clk_ptp_rate);
+
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ return 0;
+}
+
+static int ehl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ehl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_SGMII;
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
+ .setup = ehl_sgmii_data,
+};
+
+static int ehl_rgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_RGMII;
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
+ .setup = ehl_rgmii_data,
+};
+
+static int tgl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tgl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_SGMII;
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
+ .setup = tgl_sgmii_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -292,10 +454,15 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
int i;
stmmac_dvr_remove(&pdev->dev);
+ if (priv->plat->stmmac_clk)
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
@@ -348,6 +515,9 @@ static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
#define STMMAC_QUARK_ID 0x0937
#define STMMAC_DEVICE_ID 0x1108
+#define STMMAC_EHL_RGMII1G_ID 0x4b30
+#define STMMAC_EHL_SGMII1G_ID 0x4b31
+#define STMMAC_TGL_SGMII1G_ID 0xa0ac
#define STMMAC_DEVICE(vendor_id, dev_id, info) { \
PCI_VDEVICE(vendor_id, dev_id), \
@@ -358,6 +528,9 @@ static const struct pci_device_id stmmac_id_table[] = {
STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info),
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 154daf4d1072..eaf8f08f2e91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -342,10 +342,16 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
mdio = true;
}
- if (mdio)
+ if (mdio) {
plat->mdio_bus_data =
devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
return 0;
}
@@ -522,13 +528,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
}
/* clock setup */
- plat->stmmac_clk = devm_clk_get(&pdev->dev,
- STMMAC_RESOURCE_NAME);
- if (IS_ERR(plat->stmmac_clk)) {
- dev_warn(&pdev->dev, "Cannot get CSR clock\n");
- plat->stmmac_clk = NULL;
+ if (!of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+ plat->stmmac_clk = devm_clk_get(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Cannot get CSR clock\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
}
- clk_prepare_enable(plat->stmmac_clk);
plat->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(plat->pclk)) {
@@ -609,13 +617,8 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
* probe if needed before we went too far with resource allocation.
*/
stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
- if (stmmac_res->irq < 0) {
- if (stmmac_res->irq != -EPROBE_DEFER) {
- dev_err(&pdev->dev,
- "MAC IRQ configuration information not found\n");
- }
+ if (stmmac_res->irq < 0)
return stmmac_res->irq;
- }
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
* The external wake up irq can be passed through the platform code
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index c48224973a37..173493db038c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -194,6 +194,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
priv->pps[i].available = true;
}
+ if (priv->plat->ptp_max_adj)
+ stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
spin_lock_init(&priv->ptp_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index a97b1ea76438..ecc8602c6799 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -11,8 +11,10 @@
#include <linux/ip.h>
#include <linux/phy.h>
#include <linux/udp.h>
+#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
#include "stmmac.h"
struct stmmachdr {
@@ -43,6 +45,7 @@ struct stmmac_packet_attrs {
int size;
int remove_sa;
u8 id;
+ int sarc;
};
static u8 stmmac_test_next_id;
@@ -228,8 +231,11 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
goto out;
}
- if (tpriv->packet->src) {
- if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ if (tpriv->packet->sarc) {
+ if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+ goto out;
+ } else if (tpriv->packet->src) {
+ if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
goto out;
}
@@ -290,7 +296,9 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
tpriv->pt.dev = priv->dev;
tpriv->pt.af_packet_priv = tpriv;
tpriv->packet = attr;
- dev_add_pack(&tpriv->pt);
+
+ if (!attr->dont_wait)
+ dev_add_pack(&tpriv->pt);
skb = stmmac_test_get_udp_skb(priv, attr);
if (!skb) {
@@ -313,7 +321,8 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
ret = !tpriv->ok;
cleanup:
- dev_remove_pack(&tpriv->pt);
+ if (!attr->dont_wait)
+ dev_remove_pack(&tpriv->pt);
kfree(tpriv);
return ret;
}
@@ -700,6 +709,465 @@ cleanup:
return ret;
}
+static int stmmac_test_rss(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+
+ if (!priv->dma_cap.rssen || !priv->rss.enable)
+ return -EOPNOTSUPP;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.exp_hash = true;
+ attr.sport = 0x321;
+ attr.dport = 0x123;
+
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_vlan_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct udphdr *uhdr;
+ struct iphdr *ihdr;
+ u16 proto;
+
+ proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (skb_linearize(skb))
+ goto out;
+ if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+ if (tpriv->vlan_id) {
+ if (skb->vlan_proto != htons(proto))
+ goto out;
+ if (skb->vlan_tci != tpriv->vlan_id)
+ goto out;
+ }
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ goto out;
+
+ ihdr = ip_hdr(skb);
+ if (tpriv->double_vlan)
+ ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
+ if (ihdr->protocol != IPPROTO_UDP)
+ goto out;
+
+ uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (uhdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
+ if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 1;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -1;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = true;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_8021Q);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 2;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -1;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
+ struct tc_cls_u32_offload cls_u32 = { };
+ struct stmmac_packet_attrs attr = { };
+ struct tc_action **actions, *act;
+ struct tc_u32_sel *sel;
+ struct tcf_exts *exts;
+ int ret, i, nk = 1;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.frpsel)
+ return -EOPNOTSUPP;
+
+ sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
+ if (!sel)
+ return -ENOMEM;
+
+ exts = kzalloc(sizeof(*exts), GFP_KERNEL);
+ if (!exts) {
+ ret = -ENOMEM;
+ goto cleanup_sel;
+ }
+
+ actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ if (!actions) {
+ ret = -ENOMEM;
+ goto cleanup_exts;
+ }
+
+ act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
+ if (!act) {
+ ret = -ENOMEM;
+ goto cleanup_actions;
+ }
+
+ cls_u32.command = TC_CLSU32_NEW_KNODE;
+ cls_u32.common.chain_index = 0;
+ cls_u32.common.protocol = htons(ETH_P_ALL);
+ cls_u32.knode.exts = exts;
+ cls_u32.knode.sel = sel;
+ cls_u32.knode.handle = 0x123;
+
+ exts->nr_actions = nk;
+ exts->actions = actions;
+ for (i = 0; i < nk; i++) {
+ struct tcf_gact *gact = to_gact(&act[i]);
+
+ actions[i] = &act[i];
+ gact->tcf_action = TC_ACT_SHOT;
+ }
+
+ sel->nkeys = nk;
+ sel->offshift = 0;
+ sel->keys[0].off = 6;
+ sel->keys[0].val = htonl(0xdeadbeef);
+ sel->keys[0].mask = ~0x0;
+
+ ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+ if (ret)
+ goto cleanup_act;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.src = addr;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret; /* Shall NOT receive packet */
+
+ cls_u32.command = TC_CLSU32_DELETE_KNODE;
+ stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+
+cleanup_act:
+ kfree(act);
+cleanup_actions:
+ kfree(actions);
+cleanup_exts:
+ kfree(exts);
+cleanup_sel:
+ kfree(sel);
+ return ret;
+}
+#else
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_desc_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x1;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_desc_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x2;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_reg_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_reg_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+ u16 proto;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = svlan;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = priv->dev->dev_addr;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
+ skb->protocol = htons(proto);
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_vlanoff(struct stmmac_priv *priv)
+{
+ return stmmac_test_vlanoff_common(priv, false);
+}
+
+static int stmmac_test_svlanoff(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.dvlan)
+ return -EOPNOTSUPP;
+ return stmmac_test_vlanoff_common(priv, true);
+}
+
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
@@ -745,6 +1213,46 @@ static const struct stmmac_test {
.name = "Flow Control ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_flowctrl,
+ }, {
+ .name = "RSS ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rss,
+ }, {
+ .name = "VLAN Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt,
+ }, {
+ .name = "Double VLAN Filtering",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt,
+ }, {
+ .name = "Flexible RX Parser ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rxp,
+ }, {
+ .name = "SA Insertion (desc) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sai,
+ }, {
+ .name = "SA Replacement (desc)",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sar,
+ }, {
+ .name = "SA Insertion (reg) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sai,
+ }, {
+ .name = "SA Replacement (reg)",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sar,
+ }, {
+ .name = "VLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanoff,
+ }, {
+ .name = "SVLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_svlanoff,
},
};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6fc05c106afc..c91876f8c536 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2034,7 +2034,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
- frag->page_offset = off;
+ skb_frag_off_set(frag, off);
skb_frag_size_set(frag, hlen - swivel);
/* any more data? */
@@ -2058,7 +2058,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
- frag->page_offset = 0;
+ skb_frag_off_set(frag, 0);
skb_frag_size_set(frag, hlen);
RX_USED_ADD(page, hlen + cp->crc_size);
}
@@ -2816,7 +2816,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
DMA_TO_DEVICE);
- tabort = cas_calc_tabort(cp, fragp->page_offset, len);
+ tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
if (unlikely(tabort)) {
void *addr;
@@ -2827,7 +2827,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
addr = cas_page_map(skb_frag_page(fragp));
memcpy(tx_tiny_buf(cp, ring, entry),
- addr + fragp->page_offset + len - tabort,
+ addr + skb_frag_off(fragp) + len - tabort,
tabort);
cas_page_unmap(addr);
mapping = tx_tiny_map(cp, ring, entry, tentry);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0bc5863bffeb..f5fd1f3c07cc 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6695,7 +6695,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len = skb_frag_size(frag);
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
- frag->page_offset, len,
+ skb_frag_off(frag), len,
DMA_TO_DEVICE);
rp->tx_buffs[prod].skb = NULL;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index baa3088b475c..8b94d9ad9e2b 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1088,7 +1088,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
vaddr = kmap_atomic(skb_frag_page(f));
blen = skb_frag_size(f);
blen += 8 - (blen & 7);
- err = ldc_map_single(lp, vaddr + f->page_offset,
+ err = ldc_map_single(lp, vaddr + skb_frag_off(f),
blen, cookies + nc, ncookies - nc,
map_perm);
kunmap_atomic(vaddr);
@@ -1124,7 +1124,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- docopy |= f->page_offset & 7;
+ docopy |= skb_frag_off(f) & 7;
}
if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
skb_tailroom(skb) < pad ||
@@ -1532,8 +1532,7 @@ out_dropped:
else if (port)
del_timer(&port->clean_timer);
rcu_read_unlock();
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
vnet_free_skbs(freeskbs);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
index 031cf9c3435a..8c4195a9a2cc 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -503,7 +503,7 @@ static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
struct xlgmac_desc_data *desc_data;
unsigned int offset, datalen, len;
struct xlgmac_pkt_info *pkt_info;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int tso, vlan;
dma_addr_t skb_dma;
unsigned int i;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 1f8e9601592a..a1f5a1e61040 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -116,7 +116,7 @@ static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
struct sk_buff *skb,
struct xlgmac_pkt_info *pkt_info)
{
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int context_desc;
unsigned int len;
unsigned int i;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 5d6960fe3309..0f8a924fc60c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1501,7 +1501,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
bdx_tx_db_inc_wptr(db);
for (i = 0; i < nr_frags; i++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
frag = &skb_shinfo(skb)->frags[i];
db->wptr->len = skb_frag_size(frag);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a46b8b2e44e1..f298d714efd6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2764,7 +2764,7 @@ static int cpsw_probe(struct platform_device *pdev)
struct net_device *ndev;
struct cpsw_priv *priv;
void __iomem *ss_regs;
- struct resource *res, *ss_res;
+ struct resource *ss_res;
struct gpio_descs *mode;
const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
@@ -2799,8 +2799,7 @@ static int cpsw_probe(struct platform_device *pdev)
return PTR_ERR(ss_regs);
cpsw->regs = ss_regs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cpsw->wr_regs = devm_ioremap_resource(dev, res);
+ cpsw->wr_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(cpsw->wr_regs))
return PTR_ERR(cpsw->wr_regs);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 642843945031..1b2702f74455 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1116,7 +1116,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = skb_frag_page(frag);
- u32 page_offset = frag->page_offset;
+ u32 page_offset = skb_frag_off(frag);
u32 buf_len = skb_frag_size(frag);
dma_addr_t desc_dma;
u32 desc_dma_32;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 0f346761a2b2..538e70810d3d 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2311,11 +2311,9 @@ spider_net_alloc_card(void)
{
struct net_device *netdev;
struct spider_net_card *card;
- size_t alloc_size;
- alloc_size = sizeof(struct spider_net_card) +
- (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
- netdev = alloc_etherdev(alloc_size);
+ netdev = alloc_etherdev(struct_size(card, darray,
+ tx_descriptors + rx_descriptors));
if (!netdev)
return NULL;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ab55416a10fa..ed12dbd156f0 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1127,15 +1127,13 @@ static int rhine_init_one_platform(struct platform_device *pdev)
const struct of_device_id *match;
const u32 *quirks;
int irq;
- struct resource *res;
void __iomem *ioaddr;
match = of_match_device(rhine_of_tbl, &pdev->dev);
if (!match)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);