aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/ni65.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c107
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c33
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c11
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1457
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h187
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c197
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c195
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h109
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c181
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c15
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c21
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c7
-rw-r--r--drivers/net/ethernet/cortina/gemini.c9
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c21
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c39
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c86
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c97
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h5
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c97
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c101
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c78
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c46
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c363
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h60
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c414
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c151
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1052
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c24
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c110
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c80
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c146
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c200
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c565
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c655
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c543
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h39
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c9
-rw-r--r--drivers/net/ethernet/jme.c15
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c19
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c75
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c45
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c999
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h123
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c404
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c256
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c316
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1068
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c446
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c240
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h7
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c401
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h49
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c145
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.h41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c160
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c197
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c200
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c17
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c3
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig9
-rw-r--r--drivers/net/ethernet/qlogic/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c82
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c122
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h35
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c117
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/Makefile8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2353
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2024
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c735
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5027
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1285
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c12
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c13
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c3
-rw-r--r--drivers/net/ethernet/realtek/Kconfig9
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1087
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c18
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1038
-rw-r--r--drivers/net/ethernet/sgi/meth.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c68
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h132
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c787
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c192
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c435
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c173
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c516
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
371 files changed, 18022 insertions, 18051 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 147051404194..8785c2ff3825 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -847,8 +847,7 @@ static void poll_vortex(struct net_device *dev)
static int vortex_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (!ndev || !netif_running(ndev))
return 0;
@@ -861,8 +860,7 @@ static int vortex_suspend(struct device *dev)
static int vortex_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
int err;
if (!ndev || !netif_running(ndev))
@@ -2175,7 +2173,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr = skb_frag_dma_map(vp->gendev, frag,
0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(vp->gendev, dma_addr)) {
for(i = i-1; i >= 0; i--)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 010a2f48aea5..2a9f8643629c 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -110,7 +110,7 @@ static void greth_print_tx_packet(struct sk_buff *skb)
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb_frag_address(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].size, true);
+ skb_frag_size(&skb_shinfo(skb)->frags[i]), true);
}
}
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index edbb4b3604c7..174344c450af 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2426,7 +2426,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
- struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
struct phy_device *phydev = adapter->netdev->phydev;
dma_addr_t dma_addr;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2488,11 +2488,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
frag++;
}
} else {
- desc[frag].len_vlan = frags[i - 1].size;
+ desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
&frags[i - 1],
0,
- frags[i - 1].size,
+ desc[frag].len_vlan,
DMA_TO_DEVICE);
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 650d1bae5f56..1793950f0582 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1100,7 +1100,6 @@ static int au1000_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to retrieve IRQ\n");
err = -ENODEV;
goto out;
}
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 87ff5d6d1b22..c6c2a54c1121 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -697,16 +697,14 @@ static void ni65_free_buffer(struct priv *p)
for(i=0;i<TMDNUM;i++) {
kfree(p->tmdbounce[i]);
#ifdef XMT_VIA_SKB
- if(p->tmd_skb[i])
- dev_kfree_skb(p->tmd_skb[i]);
+ dev_kfree_skb(p->tmd_skb[i]);
#endif
}
for(i=0;i<RMDNUM;i++)
{
#ifdef RCV_VIA_SKB
- if(p->recv_skb[i])
- dev_kfree_skb(p->recv_skb[i]);
+ dev_kfree_skb(p->recv_skb[i]);
#else
kfree(p->recvbounce[i]);
#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b91143947ed2..b0a6c96b6ef4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -438,7 +438,6 @@ static const struct file_operations xi2c_reg_value_fops = {
void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
{
- struct dentry *pfile;
char *buf;
/* Set defaults */
@@ -451,88 +450,48 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
return;
pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
- if (!pdata->xgbe_debugfs) {
- netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
- kfree(buf);
- return;
- }
- pfile = debugfs_create_file("xgmac_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xgmac_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
- pfile = debugfs_create_file("xgmac_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xgmac_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xgmac_reg_value_fops);
- pfile = debugfs_create_file("xpcs_mmd", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_mmd_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_mmd", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
- pfile = debugfs_create_file("xpcs_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
- pfile = debugfs_create_file("xpcs_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xpcs_reg_value_fops);
if (pdata->xprop_regs) {
- pfile = debugfs_create_file("xprop_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xprop_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xprop_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xprop_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
+ debugfs_create_file("xprop_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xprop_reg_addr_fops);
+
+ debugfs_create_file("xprop_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xprop_reg_value_fops);
}
if (pdata->xi2c_regs) {
- pfile = debugfs_create_file("xi2c_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xi2c_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xi2c_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xi2c_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
+ debugfs_create_file("xi2c_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xi2c_reg_addr_fops);
+
+ debugfs_create_file("xi2c_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xi2c_reg_value_fops);
}
if (pdata->vdata->an_cdr_workaround) {
- pfile = debugfs_create_bool("an_cdr_workaround", 0600,
- pdata->xgbe_debugfs,
- &pdata->debugfs_an_cdr_workaround);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_bool failed\n");
-
- pfile = debugfs_create_bool("an_cdr_track_early", 0600,
- pdata->xgbe_debugfs,
- &pdata->debugfs_an_cdr_track_early);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_bool failed\n");
+ debugfs_create_bool("an_cdr_workaround", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_workaround);
+
+ debugfs_create_bool("an_cdr_track_early", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_track_early);
}
kfree(buf);
@@ -546,7 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- struct dentry *pfile;
char *buf;
if (!pdata->xgbe_debugfs)
@@ -559,11 +517,8 @@ void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
goto out;
- pfile = debugfs_rename(pdata->xgbe_debugfs->d_parent,
- pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_rename failed\n");
+ debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
+ pdata->xgbe_debugfs->d_parent, buf);
out:
kfree(buf);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 533094233659..230726d7b74f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -526,7 +526,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t skb_dma;
unsigned int start_index, cur_index;
unsigned int offset, tso, vlan, datalen, len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 3dd0cecddba8..98f8f2033154 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1833,7 +1833,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, struct sk_buff *skb,
struct xgbe_packet_data *packet)
{
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int context_desc;
unsigned int len;
unsigned int i;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index d0f3dfb88202..4ebd2410185a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -301,7 +301,6 @@ static int xgbe_platform_probe(struct platform_device *pdev)
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
struct platform_device *phy_pdev;
- struct resource *res;
const char *phy_mode;
unsigned int phy_memnum, phy_irqnum;
unsigned int dma_irqnum, dma_irqend;
@@ -353,8 +352,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
}
/* Obtain the mmio areas for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->xgmac_regs)) {
dev_err(dev, "xgmac ioremap failed\n");
ret = PTR_ERR(pdata->xgmac_regs);
@@ -363,8 +361,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ pdata->xpcs_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pdata->xpcs_regs)) {
dev_err(dev, "xpcs ioremap failed\n");
ret = PTR_ERR(pdata->xpcs_regs);
@@ -373,8 +370,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+ pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(pdata->rxtx_regs);
@@ -383,8 +380,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir0_regs = devm_ioremap_resource(dev, res);
+ pdata->sir0_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
ret = PTR_ERR(pdata->sir0_regs);
@@ -393,8 +390,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir1_regs = devm_ioremap_resource(dev, res);
+ pdata->sir1_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
ret = PTR_ERR(pdata->sir1_regs);
@@ -467,10 +464,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq 0 failed\n");
+ if (ret < 0)
goto err_io;
- }
pdata->dev_irq = ret;
/* Get the per channel DMA interrupts */
@@ -479,12 +474,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
ret = platform_get_irq(pdata->platdev, dma_irqnum++);
- if (ret < 0) {
- netdev_err(pdata->netdev,
- "platform_get_irq %u failed\n",
- dma_irqnum - 1);
+ if (ret < 0)
goto err_io;
- }
pdata->channel_irq[i] = ret;
}
@@ -496,10 +487,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
/* Get the auto-negotiation interrupt */
ret = platform_get_irq(phy_pdev, phy_irqnum++);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq phy 0 failed\n");
+ if (ret < 0)
goto err_io;
- }
pdata->an_irq = ret;
/* Configure the netdev resource */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 79048cc46703..02b4f3af02b5 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -54,10 +54,8 @@ static int xge_get_resources(struct xge_pdata *pdata)
}
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "Unable to get irq\n");
+ if (ret < 0)
return ret;
- }
pdata->resources.irq = ret;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 61a465097cb8..5f657879134e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -712,11 +712,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
udelay(5);
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
- acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
- "_RST", NULL, NULL);
- } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
- "_INI")) {
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
"_INI", NULL, NULL);
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 10b1c053e70a..d8612131c55e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -340,7 +340,8 @@ static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < 2 && i < nr_frags; i++)
- len += skb_shinfo(skb)->frags[i].size;
+ len += skb_frag_size(
+ &skb_shinfo(skb)->frags[i]);
/* HW requires header must reside in 3 buffer */
if (unlikely(hdr_len > len)) {
@@ -1616,7 +1617,6 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev = pdata->pdev;
- struct device *dev = &pdev->dev;
int i, ret, max_irqs;
if (phy_interface_mode_is_rgmii(pdata->phy_mode))
@@ -1636,9 +1636,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
pdata->cq_cnt = max_irqs / 2;
break;
}
- dev_err(dev, "Unable to get ENET IRQ\n");
- ret = ret ? : -ENXIO;
- return ret;
+ return ret ? : -ENXIO;
}
pdata->irqs[i] = ret;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index 6453fc2ebb1f..f482ced2cadd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -460,12 +460,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p)
}
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
- acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
- "_RST", NULL, NULL);
- else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
"_INI", NULL, NULL);
+ }
#endif
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 133eb91c542e..304b5d43f236 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -393,11 +393,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
udelay(5);
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
- acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
- "_RST", NULL, NULL);
- } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
- "_INI")) {
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
"_INI", NULL, NULL);
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index c40daad515d5..a58185b1d8bf 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -815,8 +815,8 @@ static int reverse6[64] = {
static unsigned int
crc416(unsigned int curval, unsigned short nxtval)
{
- register unsigned int counter, cur = curval, next = nxtval;
- register int high_crc_set, low_data_set;
+ unsigned int counter, cur = curval, next = nxtval;
+ int high_crc_set, low_data_set;
/* Swap bytes */
next = ((next & 0x00FF) << 8) | (next >> 8);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 6703960c7cf5..7548247455d7 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1148,7 +1148,7 @@ static int ag71xx_rings_init(struct ag71xx *ag)
return -ENOMEM;
}
- rx->buf = &tx->buf[BIT(tx->order)];
+ rx->buf = &tx->buf[tx_size];
rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
@@ -1686,7 +1686,7 @@ static int ag71xx_probe(struct platform_device *pdev)
}
ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!ag->mac_base) {
err = -ENOMEM;
goto err_free;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e3538ba7d0e7..d4bbcdfd691a 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1465,9 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
tpd->len = cpu_to_le16(maplen);
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
if (++txq->write_idx == txq->count)
txq->write_idx = 0;
@@ -1879,8 +1877,7 @@ static void alx_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int alx_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_priv *alx = dev_get_drvdata(dev);
if (!netif_running(alx->dev))
return 0;
@@ -1891,8 +1888,7 @@ static int alx_suspend(struct device *dev)
static int alx_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_priv *alx = dev_get_drvdata(dev);
struct alx_hw *hw = &alx->hw;
int err;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index be7f9cebb675..2b239ecea05f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2150,9 +2150,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
@@ -2422,8 +2420,7 @@ static int atl1c_close(struct net_device *netdev)
static int atl1c_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u32 wufc = adapter->wol;
@@ -2437,7 +2434,7 @@ static int atl1c_suspend(struct device *dev)
if (wufc)
if (atl1c_phy_to_ps_link(hw) != 0)
- dev_dbg(&pdev->dev, "phy power saving failed");
+ dev_dbg(dev, "phy power saving failed");
atl1c_power_saving(hw, wufc);
@@ -2447,8 +2444,7 @@ static int atl1c_suspend(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int atl1c_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 7f14e010bfeb..4f7b65825c15 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1770,11 +1770,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i;
u16 seg_num;
- frag = &skb_shinfo(skb)->frags[f];
buf_len = skb_frag_size(frag);
seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b5c6dc914720..b498fd6a47d0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2256,10 +2256,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i, nseg;
- frag = &skb_shinfo(skb)->frags[f];
buf_len = skb_frag_size(frag);
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
@@ -2754,8 +2753,7 @@ static int atl1_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
static int atl1_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
@@ -2780,7 +2778,7 @@ static int atl1_suspend(struct device *dev)
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
if (val) {
if (netif_msg_ifdown(adapter))
- dev_printk(KERN_DEBUG, &pdev->dev,
+ dev_printk(KERN_DEBUG, dev,
"error getting speed/duplex\n");
goto disable_wol;
}
@@ -2837,8 +2835,7 @@ static int atl1_suspend(struct device *dev)
static int atl1_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1_adapter *adapter = netdev_priv(netdev);
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 3b3370a94a9c..37752d9514e7 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1351,10 +1351,8 @@ static int nb8800_probe(struct platform_device *pdev)
ops = match->data;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "No IRQ\n");
+ if (irq <= 0)
return -EINVAL;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 291e4afd4a1a..620cd3fc1fbc 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1693,7 +1693,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
+ struct resource *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
int i, ret;
@@ -1719,8 +1719,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out;
@@ -2762,15 +2761,13 @@ struct platform_driver bcm63xx_enetsw_driver = {
/* reserve & remap memory space shared between all macs */
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *p[3];
unsigned int i;
memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
for (i = 0; i < 3; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- p[i] = devm_ioremap_resource(&pdev->dev, res);
+ p[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(p[i]))
return PTR_ERR(p[i]);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 9483553ce444..7df887e4024c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -708,8 +708,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i];
skb = bcm_sysport_rx_refill(priv, cb);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
if (!cb->skb)
return -ENOMEM;
}
@@ -2420,12 +2419,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
struct device_node *dn;
struct net_device *dev;
const void *macaddr;
- struct resource *r;
u32 txq, rxq;
int ret;
dn = pdev->dev.of_node;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
of_id = of_match_node(bcm_sysport_of_match, dn);
if (!of_id || !of_id->data)
return -EINVAL;
@@ -2473,7 +2470,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_free_netdev;
}
- priv->base = devm_ioremap_resource(&pdev->dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_free_netdev;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 6dc0dd91ad11..c46c1b1416f7 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -199,10 +199,8 @@ static int bgmac_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "MAC address not present in device tree\n");
bgmac->irq = platform_get_irq(pdev, 0);
- if (bgmac->irq < 0) {
- dev_err(&pdev->dev, "Unable to obtain IRQ\n");
+ if (bgmac->irq < 0)
return bgmac->irq;
- }
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base");
if (!regs) {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 4632dd5dbad1..148734b166f0 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -172,7 +172,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
flags = 0;
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
index = (index + 1) % BGMAC_TX_RING_SLOTS;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index dfdd14eadd57..fbc196b480b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8673,8 +8673,7 @@ bnx2_remove_one(struct pci_dev *pdev)
static int
bnx2_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnx2 *bp = netdev_priv(dev);
if (netif_running(dev)) {
@@ -8693,8 +8692,7 @@ bnx2_suspend(struct device *device)
static int
bnx2_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnx2 *bp = netdev_priv(dev);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8dce4069472b..402d9f50d92c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -116,6 +116,9 @@ enum board_idx {
BCM57508,
BCM57504,
BCM57502,
+ BCM57508_NPAR,
+ BCM57504_NPAR,
+ BCM57502_NPAR,
BCM58802,
BCM58804,
BCM58808,
@@ -161,6 +164,9 @@ static const struct {
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
+ [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
+ [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
+ [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -209,6 +215,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+ { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -242,6 +254,8 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
+ ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -828,16 +842,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return 0;
}
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
- u32 agg_bufs)
+static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 cp_cons, u16 curr)
+{
+ struct rx_agg_cmp *agg;
+
+ cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ return agg;
+}
+
+static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 agg_id, u16 curr)
+{
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
+
+ return &tpa_info->agg_arr[curr];
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
+ u16 start, u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt *bp = bnapi->bp;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u16 sw_prod = rxr->rx_sw_agg_prod;
+ bool p5_tpa = false;
u32 i;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ p5_tpa = true;
+
for (i = 0; i < agg_bufs; i++) {
u16 cons;
struct rx_agg_cmp *agg;
@@ -845,8 +884,10 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
struct rx_bd *prod_bd;
struct page *page;
- agg = (struct rx_agg_cmp *)
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ if (p5_tpa)
+ agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
+ else
+ agg = bnxt_get_agg(bp, cpr, idx, start + i);
cons = agg->rx_agg_cmp_opaque;
__clear_bit(cons, rxr->rx_agg_bmap);
@@ -874,7 +915,6 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
prod = NEXT_RX_AGG(prod);
sw_prod = NEXT_RX_AGG(sw_prod);
- cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
@@ -888,7 +928,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
{
unsigned int payload = offset_and_len >> 16;
unsigned int len = offset_and_len & 0xffff;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct page *page = data;
u16 prod = rxr->rx_prod;
struct sk_buff *skb;
@@ -919,7 +959,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
frag = &skb_shinfo(skb)->frags[0];
skb_frag_size_sub(frag, payload);
- frag->page_offset += payload;
+ skb_frag_off_add(frag, payload);
skb->data_len -= payload;
skb->tail += payload;
@@ -957,15 +997,19 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 cp_cons,
- u32 agg_bufs)
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ bool p5_tpa = false;
u32 i;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ p5_tpa = true;
+
for (i = 0; i < agg_bufs; i++) {
u16 cons, frag_len;
struct rx_agg_cmp *agg;
@@ -973,8 +1017,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct page *page;
dma_addr_t mapping;
- agg = (struct rx_agg_cmp *)
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ if (p5_tpa)
+ agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
+ else
+ agg = bnxt_get_agg(bp, cpr, idx, i);
cons = agg->rx_agg_cmp_opaque;
frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
@@ -1008,7 +1054,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
* allocated already.
*/
rxr->rx_agg_prod = prod;
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
+ bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
return NULL;
}
@@ -1021,7 +1067,6 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
skb->truesize += PAGE_SIZE;
prod = NEXT_RX_AGG(prod);
- cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
return skb;
@@ -1081,9 +1126,10 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_AGG_BUFS) >>
- RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return 0;
+
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
}
if (agg_bufs) {
@@ -1094,6 +1140,14 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return 0;
}
+static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
+{
+ if (BNXT_PF(bp))
+ queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
+ else
+ schedule_delayed_work(&bp->fw_reset_task, delay);
+}
+
static void bnxt_queue_sp_work(struct bnxt *bp)
{
if (BNXT_PF(bp))
@@ -1120,26 +1174,60 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
rxr->rx_next_cons = 0xffff;
}
+static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+ u16 idx = agg_id & MAX_TPA_P5_MASK;
+
+ if (test_bit(idx, map->agg_idx_bmap))
+ idx = find_first_zero_bit(map->agg_idx_bmap,
+ BNXT_AGG_IDX_BMAP_SIZE);
+ __set_bit(idx, map->agg_idx_bmap);
+ map->agg_id_tbl[agg_id] = idx;
+ return idx;
+}
+
+static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ __clear_bit(idx, map->agg_idx_bmap);
+}
+
+static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ return map->agg_id_tbl[agg_id];
+}
+
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
- u8 agg_id = TPA_START_AGG_ID(tpa_start);
- u16 cons, prod;
- struct bnxt_tpa_info *tpa_info;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnxt_tpa_info *tpa_info;
+ u16 cons, prod, agg_id;
struct rx_bd *prod_bd;
dma_addr_t mapping;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ agg_id = TPA_START_AGG_ID_P5(tpa_start);
+ agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
+ } else {
+ agg_id = TPA_START_AGG_ID(tpa_start);
+ }
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf = &rxr->rx_buf_ring[prod];
tpa_info = &rxr->rx_tpa[agg_id];
- if (unlikely(cons != rxr->rx_next_cons)) {
- netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
- cons, rxr->rx_next_cons);
+ if (unlikely(cons != rxr->rx_next_cons ||
+ TPA_START_ERROR(tpa_start))) {
+ netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
+ cons, rxr->rx_next_cons,
+ TPA_START_ERROR_CODE(tpa_start1));
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1184,6 +1272,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons);
@@ -1195,13 +1284,37 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons_rx_buf->data = NULL;
}
-static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
- u32 agg_bufs)
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
{
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
}
+#ifdef CONFIG_INET
+static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+#endif
+
static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
int payload_off, int tcp_ts,
struct sk_buff *skb)
@@ -1259,28 +1372,39 @@ static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
}
if (inner_mac_off) { /* tunnel */
- struct udphdr *uh = NULL;
__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
ETH_HLEN - 2));
- if (proto == htons(ETH_P_IP)) {
- struct iphdr *iph = (struct iphdr *)skb->data;
+ bnxt_gro_tunnel(skb, proto);
+ }
+#endif
+ return skb;
+}
- if (iph->protocol == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- } else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ int iphdr_len, nw_off;
- if (iph->nexthdr == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- }
- if (uh) {
- if (uh->check)
- skb_shinfo(skb)->gso_type |=
- SKB_GSO_UDP_TUNNEL_CSUM;
- else
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
+ inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
+ sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+ skb_set_transport_header(skb, nw_off + iphdr_len);
+
+ if (inner_mac_off) { /* tunnel */
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ bnxt_gro_tunnel(skb, proto);
}
#endif
return skb;
@@ -1327,28 +1451,8 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
return NULL;
}
- if (nw_off) { /* tunnel */
- struct udphdr *uh = NULL;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = (struct iphdr *)skb->data;
-
- if (iph->protocol == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- } else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
-
- if (iph->nexthdr == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- }
- if (uh) {
- if (uh->check)
- skb_shinfo(skb)->gso_type |=
- SKB_GSO_UDP_TUNNEL_CSUM;
- else
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
- }
+ if (nw_off) /* tunnel */
+ bnxt_gro_tunnel(skb, skb->protocol);
#endif
return skb;
}
@@ -1371,9 +1475,10 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
- RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
+ else
+ payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
if (likely(skb))
tcp_gro_complete(skb);
@@ -1401,14 +1506,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u8 agg_id = TPA_END_AGG_ID(tpa_end);
u8 *data_ptr, agg_bufs;
- u16 cp_cons = RING_CMP(*raw_cons);
unsigned int len;
struct bnxt_tpa_info *tpa_info;
dma_addr_t mapping;
struct sk_buff *skb;
+ u16 idx = 0, agg_id;
void *data;
+ bool gro;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
@@ -1418,26 +1523,43 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- tpa_info = &rxr->rx_tpa[agg_id];
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ agg_id = TPA_END_AGG_ID_P5(tpa_end);
+ agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
+ agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ if (unlikely(agg_bufs != tpa_info->agg_count)) {
+ netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
+ agg_bufs, tpa_info->agg_count);
+ agg_bufs = tpa_info->agg_count;
+ }
+ tpa_info->agg_count = 0;
+ *event |= BNXT_AGG_EVENT;
+ bnxt_free_agg_idx(rxr, agg_id);
+ idx = agg_id;
+ gro = !!(bp->flags & BNXT_FLAG_GRO);
+ } else {
+ agg_id = TPA_END_AGG_ID(tpa_end);
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ idx = RING_CMP(*raw_cons);
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+ return ERR_PTR(-EBUSY);
+
+ *event |= BNXT_AGG_EVENT;
+ idx = NEXT_CMP(idx);
+ }
+ gro = !!TPA_END_GRO(tpa_end);
+ }
data = tpa_info->data;
data_ptr = tpa_info->data_ptr;
prefetch(data_ptr);
len = tpa_info->len;
mapping = tpa_info->mapping;
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
-
- if (agg_bufs) {
- if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
- return ERR_PTR(-EBUSY);
-
- *event |= BNXT_AGG_EVENT;
- cp_cons = NEXT_CMP(cp_cons);
- }
-
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
if (agg_bufs > MAX_SKB_FRAGS)
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
agg_bufs, (int)MAX_SKB_FRAGS);
@@ -1447,7 +1569,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
} else {
@@ -1456,7 +1578,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
@@ -1471,7 +1593,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
kfree(data);
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1479,7 +1601,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
return NULL;
@@ -1508,12 +1630,24 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
}
- if (TPA_END_GRO(tpa_end))
+ if (gro)
skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
return skb;
}
+static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct rx_agg_cmp *rx_agg)
+{
+ u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
+ struct bnxt_tpa_info *tpa_info;
+
+ agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
+ tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
+}
+
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
@@ -1555,6 +1689,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
+ bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
+ goto next_rx_no_prod_no_len;
+ }
+
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
rxcmp1 = (struct rx_cmp_ext *)
@@ -1563,8 +1704,6 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
- cmp_type = RX_CMP_TYPE(rxcmp);
-
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1623,7 +1762,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
+ false);
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
@@ -1646,7 +1786,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
rc = -ENOMEM;
goto next_rx;
}
@@ -1666,7 +1807,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1765,6 +1906,33 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
return bnxt_rx_pkt(bp, cpr, raw_cons, event);
}
+u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg = fw_health->regs[reg_idx];
+ u32 reg_type, reg_off, val = 0;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
+ reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
+ switch (reg_type) {
+ case BNXT_FW_HEALTH_REG_TYPE_CFG:
+ pci_read_config_dword(bp->pdev, reg_off, &val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_GRC:
+ reg_off = fw_health->mapped_regs[reg_idx];
+ /* fall through */
+ case BNXT_FW_HEALTH_REG_TYPE_BAR0:
+ val = readl(bp->bar0 + reg_off);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR1:
+ val = readl(bp->bar1 + reg_off);
+ break;
+ }
+ if (reg_idx == BNXT_FW_RESET_INPROG_REG)
+ val &= fw_health->fw_reset_inprog_reg_mask;
+ return val;
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -1820,6 +1988,55 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+
+ bp->fw_reset_timestamp = jiffies;
+ bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
+ if (!bp->fw_reset_min_dsecs)
+ bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
+ bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
+ if (!bp->fw_reset_max_dsecs)
+ bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
+ if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ netdev_warn(bp->dev, "Firmware fatal reset event received\n");
+ set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ } else {
+ netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
+ bp->fw_reset_max_dsecs * 100);
+ }
+ set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
+ break;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+
+ if (!fw_health)
+ goto async_event_process_exit;
+
+ fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
+ fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+ if (!fw_health->enabled)
+ break;
+
+ if (netif_msg_drv(bp))
+ netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
+ fw_health->enabled, fw_health->master,
+ bnxt_fw_health_readl(bp,
+ BNXT_FW_RESET_CNT_REG),
+ bnxt_fw_health_readl(bp,
+ BNXT_FW_HEALTH_REG));
+ fw_health->tmr_multiplier =
+ DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
+ bp->current_interval * 10);
+ fw_health->tmr_counter = fw_health->tmr_multiplier;
+ fw_health->last_fw_heartbeat =
+ bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -2325,10 +2542,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_tpa_idx_map *map;
int j;
if (rxr->rx_tpa) {
- for (j = 0; j < MAX_TPA; j++) {
+ for (j = 0; j < bp->max_tpa; j++) {
struct bnxt_tpa_info *tpa_info =
&rxr->rx_tpa[j];
u8 *data = tpa_info->data;
@@ -2395,6 +2613,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
}
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
}
}
@@ -2483,6 +2704,61 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_tpa_info(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ kfree(rxr->rx_tpa[0].agg_arr);
+ rxr->rx_tpa[0].agg_arr = NULL;
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+ }
+}
+
+static int bnxt_alloc_tpa_info(struct bnxt *bp)
+{
+ int i, j, total_aggs = 0;
+
+ bp->max_tpa = MAX_TPA;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (!bp->max_tpa_v2)
+ return 0;
+ bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
+ total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
+ }
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct rx_agg_cmp *agg;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+ agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
+ rxr->rx_tpa[0].agg_arr = agg;
+ if (!agg)
+ return -ENOMEM;
+ for (j = 1; j < bp->max_tpa; j++)
+ rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
@@ -2490,6 +2766,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
if (!bp->rx_ring)
return;
+ bnxt_free_tpa_info(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
@@ -2503,9 +2780,6 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
page_pool_destroy(rxr->page_pool);
rxr->page_pool = NULL;
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
-
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -2539,7 +2813,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc, agg_rings = 0, tpa_rings = 0;
+ int i, rc = 0, agg_rings = 0;
if (!bp->rx_ring)
return -ENOMEM;
@@ -2547,9 +2821,6 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
agg_rings = 1;
- if (bp->flags & BNXT_FLAG_TPA)
- tpa_rings = 1;
-
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
@@ -2591,17 +2862,11 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;
-
- if (tpa_rings) {
- rxr->rx_tpa = kcalloc(MAX_TPA,
- sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
- }
}
}
- return 0;
+ if (bp->flags & BNXT_FLAG_TPA)
+ rc = bnxt_alloc_tpa_info(bp);
+ return rc;
}
static void bnxt_free_tx_rings(struct bnxt *bp)
@@ -2953,7 +3218,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
u8 *data;
dma_addr_t mapping;
- for (i = 0; i < MAX_TPA; i++) {
+ for (i = 0; i < bp->max_tpa; i++) {
data = __bnxt_alloc_rx_data(bp, &mapping,
GFP_KERNEL);
if (!data)
@@ -3376,6 +3641,9 @@ static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwrm_cmd_kong_resp_addr)
+ return 0;
+
bp->hwrm_cmd_kong_resp_addr =
dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&bp->hwrm_cmd_kong_resp_dma_addr,
@@ -3415,6 +3683,9 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwrm_short_cmd_req_addr)
+ return 0;
+
bp->hwrm_short_cmd_req_addr =
dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
&bp->hwrm_short_cmd_req_dma_addr,
@@ -3468,7 +3739,7 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
if (!bp->bnapi)
return;
- size = sizeof(struct ctx_hw_stats);
+ size = bp->hw_ring_stats_size;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3487,7 +3758,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
u32 size, i;
struct pci_dev *pdev = bp->pdev;
- size = sizeof(struct ctx_hw_stats);
+ size = bp->hw_ring_stats_size;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3869,6 +4140,32 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
+static int bnxt_hwrm_to_stderr(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int timeout, bool silent)
{
@@ -3886,6 +4183,9 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
+
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
if (msg_len > bp->hwrm_max_ext_req_len ||
!bp->hwrm_short_cmd_req_addr)
@@ -3950,6 +4250,9 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Ring channel doorbell */
writel(1, bp->bar0 + doorbell_offset);
+ if (!pci_is_enabled(bp->pdev))
+ return 0;
+
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
/* convert timeout to usec */
@@ -3981,9 +4284,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
- netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- le16_to_cpu(req->req_type));
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+ le16_to_cpu(req->req_type));
+ return -EBUSY;
}
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
@@ -4007,11 +4311,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (i >= tmo_count) {
- netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
- HWRM_TOTAL_TIMEOUT(i),
- le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), len);
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len);
+ return -EBUSY;
}
/* Last byte of resp contains valid bit */
@@ -4025,11 +4330,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
- netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
- HWRM_TOTAL_TIMEOUT(i),
- le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), len, *valid);
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len,
+ *valid);
+ return -EBUSY;
}
}
@@ -4043,7 +4350,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
le16_to_cpu(resp->req_type),
le16_to_cpu(resp->seq_id), rc);
- return rc;
+ return bnxt_hwrm_to_stderr(rc);
}
int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
@@ -4092,9 +4399,14 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
- for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
- __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
+ u16 event_id = bnxt_async_events_arr[i];
+ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ continue;
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ }
if (bmap && bmap_size) {
for (i = 0; i < bmap_size; i++) {
if (test_bit(i, bmap))
@@ -4112,6 +4424,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
+ u32 flags;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -4121,7 +4434,11 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
FUNC_DRV_RGTR_REQ_ENABLES_VER);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
- req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
+ FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
+ req.flags = cpu_to_le32(flags);
req.ver_maj_8b = DRV_VER_MAJ;
req.ver_min_8b = DRV_VER_MIN;
req.ver_upd_8b = DRV_VER_UPD;
@@ -4156,10 +4473,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
- else if (resp->flags &
- cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ if (!rc && (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4414,6 +4729,7 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input req = {0};
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
@@ -4453,9 +4769,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n;
}
- segs = ilog2(nsegs);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ segs = MAX_TPA_SEGS_P5;
+ max_aggs = bp->max_tpa;
+ } else {
+ segs = ilog2(nsegs);
+ }
req.max_agg_segs = cpu_to_le16(segs);
- req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+ req.max_aggs = cpu_to_le16(max_aggs);
req.min_agg_len = cpu_to_le32(512);
}
@@ -4576,7 +4897,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -EIO;
+ return rc;
}
return 0;
}
@@ -4739,8 +5060,6 @@ static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return rc;
bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
}
return rc;
@@ -4800,6 +5119,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
struct hwrm_vnic_qcaps_input req = {0};
int rc;
+ bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
+ bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
if (bp->hwrm_spec_code < 0x10600)
return 0;
@@ -4815,6 +5136,10 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
+ bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bp->max_tpa_v2)
+ bp->hw_ring_stats_size =
+ sizeof(struct ctx_hw_stats_ext);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4874,8 +5199,6 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc)
- break;
bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5194,6 +5517,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
u16 error_code;
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
req.ring_type = ring_type;
req.ring_id = cpu_to_le16(ring->fw_ring_id);
@@ -5331,7 +5657,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
- return -EIO;
+ return rc;
}
hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
@@ -5495,7 +5821,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -ENOMEM;
+ return rc;
if (bp->hwrm_spec_code < 0x10601)
bp->hw_resc.resv_tx_rings = tx_rings;
@@ -5520,7 +5846,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
cp_rings, stats, vnics);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -ENOMEM;
+ return rc;
rc = bnxt_hwrm_get_rings(bp);
return rc;
@@ -5701,9 +6027,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -ENOMEM;
- return 0;
+ return rc;
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -5731,9 +6055,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -ENOMEM;
- return 0;
+ return rc;
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -5995,8 +6317,6 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc)
- break;
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
@@ -6016,6 +6336,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+ req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -6057,6 +6378,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ } else {
+ bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
flags = le16_to_cpu(resp->flags);
@@ -6292,8 +6615,6 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
}
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -6555,10 +6876,8 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc) {
- rc = -EIO;
+ if (rc)
goto hwrm_func_resc_qcaps_exit;
- }
hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
if (!all)
@@ -6626,6 +6945,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
+ bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
bp->tx_push_thresh = 0;
if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
@@ -6657,6 +6978,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
@@ -6726,6 +7048,103 @@ hwrm_cfa_adv_qcaps_exit:
return rc;
}
+static int bnxt_map_fw_health_regs(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg_base = 0xffffffff;
+ int i;
+
+ /* Only pre-map the monitoring GRC registers using window 3 */
+ for (i = 0; i < 4; i++) {
+ u32 reg = fw_health->regs[i];
+
+ if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
+ continue;
+ if (reg_base == 0xffffffff)
+ reg_base = reg & BNXT_GRC_BASE_MASK;
+ if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
+ return -ERANGE;
+ fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
+ (reg & BNXT_GRC_OFFSET_MASK);
+ }
+ if (reg_base == 0xffffffff)
+ return 0;
+
+ writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+ BNXT_FW_HEALTH_WIN_MAP_OFF);
+ return 0;
+}
+
+static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
+{
+ struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct hwrm_error_recovery_qcfg_input req = {0};
+ int rc, i;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto err_recovery_out;
+ if (!fw_health) {
+ fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
+ bp->fw_health = fw_health;
+ if (!fw_health) {
+ rc = -ENOMEM;
+ goto err_recovery_out;
+ }
+ }
+ fw_health->flags = le32_to_cpu(resp->flags);
+ if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
+ !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
+ rc = -EINVAL;
+ goto err_recovery_out;
+ }
+ fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
+ fw_health->master_func_wait_dsecs =
+ le32_to_cpu(resp->master_func_wait_period);
+ fw_health->normal_func_wait_dsecs =
+ le32_to_cpu(resp->normal_func_wait_period);
+ fw_health->post_reset_wait_dsecs =
+ le32_to_cpu(resp->master_func_wait_period_after_reset);
+ fw_health->post_reset_max_wait_dsecs =
+ le32_to_cpu(resp->max_bailout_time_after_reset);
+ fw_health->regs[BNXT_FW_HEALTH_REG] =
+ le32_to_cpu(resp->fw_health_status_reg);
+ fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
+ le32_to_cpu(resp->fw_heartbeat_reg);
+ fw_health->regs[BNXT_FW_RESET_CNT_REG] =
+ le32_to_cpu(resp->fw_reset_cnt_reg);
+ fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
+ le32_to_cpu(resp->reset_inprogress_reg);
+ fw_health->fw_reset_inprog_reg_mask =
+ le32_to_cpu(resp->reset_inprogress_reg_mask);
+ fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
+ if (fw_health->fw_reset_seq_cnt >= 16) {
+ rc = -EINVAL;
+ goto err_recovery_out;
+ }
+ for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
+ fw_health->fw_reset_seq_regs[i] =
+ le32_to_cpu(resp->reset_reg[i]);
+ fw_health->fw_reset_seq_vals[i] =
+ le32_to_cpu(resp->reset_reg_val[i]);
+ fw_health->fw_reset_seq_delay_msec[i] =
+ resp->delay_after_reset[i];
+ }
+err_recovery_out:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc)
+ rc = bnxt_map_fw_health_regs(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return rc;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -6785,20 +7204,30 @@ qportcfg_exit:
return rc;
}
-static int bnxt_hwrm_ver_get(struct bnxt *bp)
+static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
{
- int rc;
struct hwrm_ver_get_input req = {0};
- struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
- u32 dev_caps_cfg;
+ int rc;
- bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
+ silent);
+ return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 dev_caps_cfg;
+ int rc;
+
+ bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = __bnxt_hwrm_ver_get(bp, false);
if (rc)
goto hwrm_ver_get_exit;
@@ -7001,6 +7430,8 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
if (set_tpa)
tpa_flags = bp->flags & BNXT_FLAG_TPA;
+ else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return 0;
for (i = 0; i < bp->nr_vnics; i++) {
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
if (rc) {
@@ -7066,8 +7497,6 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
else
return -EINVAL;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -7087,8 +7516,6 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -7971,6 +8398,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_link_info *link_info = &bp->link_info;
+ bp->flags &= ~BNXT_FLAG_EEE_CAP;
+ if (bp->test_info)
+ bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK;
if (bp->hwrm_spec_code < 0x10201)
return 0;
@@ -8292,11 +8722,14 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_fw_init_one(struct bnxt *bp);
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_if_change_input req = {0};
- bool resc_reinit = false;
+ bool resc_reinit = false, fw_reset = false;
+ u32 flags = 0;
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
@@ -8307,26 +8740,57 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc && (resp->flags &
- cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
- resc_reinit = true;
+ if (!rc)
+ flags = le32_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (rc)
+ return rc;
- if (up && resc_reinit && BNXT_NEW_RM(bp)) {
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ if (!up)
+ return 0;
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- hw_resc->resv_cp_rings = 0;
- hw_resc->resv_stat_ctxs = 0;
- hw_resc->resv_irqs = 0;
- hw_resc->resv_tx_rings = 0;
- hw_resc->resv_rx_rings = 0;
- hw_resc->resv_hw_ring_grps = 0;
- hw_resc->resv_vnics = 0;
- bp->tx_nr_rings = 0;
- bp->rx_nr_rings = 0;
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
+ resc_reinit = true;
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ fw_reset = true;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
+ netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
+ return -ENODEV;
}
- return rc;
+ if (resc_reinit || fw_reset) {
+ if (fw_reset) {
+ rc = bnxt_fw_init_one(bp);
+ if (rc) {
+ set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ return rc;
+ }
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "init int mode failed\n");
+ return rc;
+ }
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ }
+ if (BNXT_NEW_RM(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
+ hw_resc->resv_irqs = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ if (!fw_reset) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
+ }
+ }
+ return 0;
}
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
@@ -8336,6 +8800,7 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
struct bnxt_pf_info *pf = &bp->pf;
int rc;
+ bp->num_leds = 0;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
return 0;
@@ -8430,6 +8895,7 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
{
u16 handle = 0;
+ bp->wol = 0;
if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
return;
@@ -8476,6 +8942,9 @@ static void bnxt_hwmon_open(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwmon_dev)
+ return;
+
bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
DRV_MODULE_NAME, bp,
bnxt_groups);
@@ -8741,12 +9210,28 @@ static int bnxt_open(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int rc;
- bnxt_hwrm_if_change(bp, true);
- rc = __bnxt_open_nic(bp, true, true);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
+ return -ENODEV;
+ }
+
+ rc = bnxt_hwrm_if_change(bp, true);
if (rc)
+ return rc;
+ rc = __bnxt_open_nic(bp, true, true);
+ if (rc) {
bnxt_hwrm_if_change(bp, false);
+ } else {
+ if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
+ BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+ int n = pf->active_vfs;
- bnxt_hwmon_open(bp);
+ if (n)
+ bnxt_cfg_hw_sriov(bp, &n, true);
+ }
+ bnxt_hwmon_open(bp);
+ }
return rc;
}
@@ -8783,6 +9268,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
del_timer_sync(&bp->timer);
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
+ pci_is_enabled(bp->pdev))
+ pci_disable_device(bp->pdev);
+
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
@@ -8799,6 +9288,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ /* If we get here, it means firmware reset is in progress
+ * while we are trying to close. We can safely proceed with
+ * the close because we are holding rtnl_lock(). Some firmware
+ * messages may fail as we proceed to close. We set the
+ * ABORT_ERR flag here so that the FW reset thread will later
+ * abort when it gets the rtnl_lock() and sees the flag.
+ */
+ netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
+ set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ }
+
#ifdef CONFIG_BNXT_SRIOV
if (bp->sriov_cfg) {
rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
@@ -9306,7 +9807,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & BNXT_FLAG_TPA) {
update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
- (flags & BNXT_FLAG_TPA) == 0)
+ (flags & BNXT_FLAG_TPA) == 0 ||
+ (bp->flags & BNXT_FLAG_CHIP_P5))
re_init = true;
}
@@ -9316,9 +9818,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (flags != bp->flags) {
u32 old_flags = bp->flags;
- bp->flags = flags;
-
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ bp->flags = flags;
if (update_tpa)
bnxt_set_ring_params(bp);
return rc;
@@ -9326,12 +9827,14 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (re_init) {
bnxt_close_nic(bp, false, false);
+ bp->flags = flags;
if (update_tpa)
bnxt_set_ring_params(bp);
return bnxt_open_nic(bp, false, false);
}
if (update_tpa) {
+ bp->flags = flags;
rc = bnxt_set_tpa(bp,
(flags & BNXT_FLAG_TPA) ?
true : false);
@@ -9438,6 +9941,38 @@ static void bnxt_tx_timeout(struct net_device *dev)
bnxt_queue_sp_work(bp);
}
+static void bnxt_fw_health_check(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 val;
+
+ if (!fw_health || !fw_health->enabled ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return;
+
+ if (fw_health->tmr_counter) {
+ fw_health->tmr_counter--;
+ return;
+ }
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (val == fw_health->last_fw_heartbeat)
+ goto fw_reset;
+
+ fw_health->last_fw_heartbeat = val;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ if (val != fw_health->last_fw_reset_cnt)
+ goto fw_reset;
+
+ fw_health->tmr_counter = fw_health->tmr_multiplier;
+ return;
+
+fw_reset:
+ set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+}
+
static void bnxt_timer(struct timer_list *t)
{
struct bnxt *bp = from_timer(bp, t, timer);
@@ -9449,6 +9984,9 @@ static void bnxt_timer(struct timer_list *t)
if (atomic_read(&bp->intr_sem) != 0)
goto bnxt_restart_timer;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ bnxt_fw_health_check(bp);
+
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
@@ -9504,6 +10042,130 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_reset_close(struct bnxt *bp)
+{
+ __bnxt_close_nic(bp, true, false);
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ bnxt_hwrm_func_drv_unrgtr(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
+}
+
+static bool is_bnxt_fw_ok(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ bool no_heartbeat = false, has_reset = false;
+ u32 val;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (val == fw_health->last_fw_heartbeat)
+ no_heartbeat = true;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ if (val != fw_health->last_fw_reset_cnt)
+ has_reset = true;
+
+ if (!no_heartbeat && has_reset)
+ return true;
+
+ return false;
+}
+
+/* rtnl_lock is acquired before calling this function */
+static void bnxt_force_fw_reset(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 wait_dsecs;
+
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return;
+
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_fw_reset_close(bp);
+ wait_dsecs = fw_health->master_func_wait_dsecs;
+ if (fw_health->master) {
+ if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
+ wait_dsecs = 0;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
+ } else {
+ bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
+ wait_dsecs = fw_health->normal_func_wait_dsecs;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ }
+ bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
+ bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+}
+
+void bnxt_fw_exception(struct bnxt *bp)
+{
+ set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ bnxt_rtnl_lock_sp(bp);
+ bnxt_force_fw_reset(bp);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
+/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
+ * < 0 on error.
+ */
+static int bnxt_get_registered_vfs(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ int rc;
+
+ if (!BNXT_PF(bp))
+ return 0;
+
+ rc = bnxt_hwrm_func_qcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
+ return rc;
+ }
+ if (bp->pf.registered_vfs)
+ return bp->pf.registered_vfs;
+ if (bp->sriov_cfg)
+ return 1;
+#endif
+ return 0;
+}
+
+void bnxt_fw_reset(struct bnxt *bp)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
+ !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ int n = 0;
+
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->pf.active_vfs &&
+ !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ n = bnxt_get_registered_vfs(bp);
+ if (n < 0) {
+ netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
+ n);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ dev_close(bp->dev);
+ goto fw_reset_exit;
+ } else if (n > 0) {
+ u16 vf_tmo_dsecs = n * 10;
+
+ if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
+ bp->fw_reset_max_dsecs = vf_tmo_dsecs;
+ bp->fw_reset_state =
+ BNXT_FW_RESET_STATE_POLL_VF;
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ goto fw_reset_exit;
+ }
+ bnxt_fw_reset_close(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
+ }
+fw_reset_exit:
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_chk_missed_irq(struct bnxt *bp)
{
int i;
@@ -9634,6 +10296,15 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
+ if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
+ bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
+
+ if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
+ if (!is_bnxt_fw_ok(bp))
+ bnxt_devlink_health_report(bp,
+ BNXT_FW_EXCEPTION_SP_EVENT);
+ }
+
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
@@ -9728,6 +10399,308 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
+static int bnxt_fw_init_one_p1(struct bnxt *bp)
+{
+ int rc;
+
+ bp->fw_cap = 0;
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ return rc;
+
+ if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
+ rc = bnxt_alloc_kong_hwrm_resources(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
+ }
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
+ rc = bnxt_alloc_hwrm_short_cmd_req(bp);
+ if (rc)
+ return rc;
+ }
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ return -ENODEV;
+
+ bnxt_hwrm_fw_set_time(bp);
+ return 0;
+}
+
+static int bnxt_fw_init_one_p2(struct bnxt *bp)
+{
+ int rc;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+ rc);
+ return -ENODEV;
+ }
+
+ rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
+ rc);
+
+ rc = bnxt_hwrm_error_recovery_qcfg(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
+ rc);
+
+ rc = bnxt_hwrm_func_drv_rgtr(bp);
+ if (rc)
+ return -ENODEV;
+
+ rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ if (rc)
+ return -ENODEV;
+
+ bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_vnic_qcaps(bp);
+ bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_ethtool_init(bp);
+ bnxt_dcb_init(bp);
+ return 0;
+}
+
+static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
+ bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ }
+}
+
+static void bnxt_set_dflt_rfs(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+
+ dev->hw_features &= ~NETIF_F_NTUPLE;
+ dev->features &= ~NETIF_F_NTUPLE;
+ bp->flags &= ~BNXT_FLAG_RFS;
+ if (bnxt_rfs_supported(bp)) {
+ dev->hw_features |= NETIF_F_NTUPLE;
+ if (bnxt_rfs_capable(bp)) {
+ bp->flags |= BNXT_FLAG_RFS;
+ dev->features |= NETIF_F_NTUPLE;
+ }
+ }
+}
+
+static void bnxt_fw_init_one_p3(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bnxt_set_dflt_rss_hash_type(bp);
+ bnxt_set_dflt_rfs(bp);
+
+ bnxt_get_wol_settings(bp);
+ if (bp->flags & BNXT_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&pdev->dev, bp->wol);
+ else
+ device_set_wakeup_capable(&pdev->dev, false);
+
+ bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+ bnxt_hwrm_coal_params_qcaps(bp);
+}
+
+static int bnxt_fw_init_one(struct bnxt *bp)
+{
+ int rc;
+
+ rc = bnxt_fw_init_one_p1(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Firmware init phase 1 failed\n");
+ return rc;
+ }
+ rc = bnxt_fw_init_one_p2(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Firmware init phase 2 failed\n");
+ return rc;
+ }
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
+ if (rc)
+ return rc;
+ bnxt_fw_init_one_p3(bp);
+ return 0;
+}
+
+static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
+ u32 val = fw_health->fw_reset_seq_vals[reg_idx];
+ u32 reg_type, reg_off, delay_msecs;
+
+ delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
+ reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
+ switch (reg_type) {
+ case BNXT_FW_HEALTH_REG_TYPE_CFG:
+ pci_write_config_dword(bp->pdev, reg_off, val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_GRC:
+ writel(reg_off & BNXT_GRC_BASE_MASK,
+ bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
+ reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
+ /* fall through */
+ case BNXT_FW_HEALTH_REG_TYPE_BAR0:
+ writel(val, bp->bar0 + reg_off);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR1:
+ writel(val, bp->bar1 + reg_off);
+ break;
+ }
+ if (delay_msecs) {
+ pci_read_config_dword(bp->pdev, 0, &val);
+ msleep(delay_msecs);
+ }
+}
+
+static void bnxt_reset_all(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ int i;
+
+ if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
+ for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
+ bnxt_fw_reset_writel(bp, i);
+ } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
+ struct hwrm_fw_reset_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+ req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
+ req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
+ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+ req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ }
+ bp->fw_reset_timestamp = jiffies;
+}
+
+static void bnxt_fw_reset_task(struct work_struct *work)
+{
+ struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
+ int rc;
+
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
+ return;
+ }
+
+ switch (bp->fw_reset_state) {
+ case BNXT_FW_RESET_STATE_POLL_VF: {
+ int n = bnxt_get_registered_vfs(bp);
+
+ if (n < 0) {
+ netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
+ n, jiffies_to_msecs(jiffies -
+ bp->fw_reset_timestamp));
+ goto fw_reset_abort;
+ } else if (n > 0) {
+ if (time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10))) {
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bp->fw_reset_state = 0;
+ netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
+ n);
+ return;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ return;
+ }
+ bp->fw_reset_timestamp = jiffies;
+ rtnl_lock();
+ bnxt_fw_reset_close(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ rtnl_unlock();
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
+ return;
+ }
+ case BNXT_FW_RESET_STATE_RESET_FW: {
+ u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
+
+ bnxt_reset_all(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+ return;
+ }
+ case BNXT_FW_RESET_STATE_ENABLE_DEV:
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
+ bp->fw_health) {
+ u32 val;
+
+ val = bnxt_fw_health_readl(bp,
+ BNXT_FW_RESET_INPROG_REG);
+ if (val)
+ netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
+ val);
+ }
+ clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ if (pci_enable_device(bp->pdev)) {
+ netdev_err(bp->dev, "Cannot re-enable PCI device\n");
+ goto fw_reset_abort;
+ }
+ pci_set_master(bp->pdev);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
+ /* fall through */
+ case BNXT_FW_RESET_STATE_POLL_FW:
+ bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ if (rc) {
+ if (time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10))) {
+ netdev_err(bp->dev, "Firmware reset aborted\n");
+ goto fw_reset_abort;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 5);
+ return;
+ }
+ bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
+ /* fall through */
+ case BNXT_FW_RESET_STATE_OPENING:
+ while (!rtnl_trylock()) {
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ return;
+ }
+ rc = bnxt_open(bp->dev);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_open_nic() failed\n");
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ dev_close(bp->dev);
+ }
+ bnxt_ulp_irq_restart(bp, rc);
+ rtnl_unlock();
+
+ bp->fw_reset_state = 0;
+ /* Make sure fw_reset_state is 0 before clearing the flag */
+ smp_mb__before_atomic();
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ break;
+ }
+ return;
+
+fw_reset_abort:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bp->fw_reset_state = 0;
+ rtnl_lock();
+ dev_close(bp->dev);
+ rtnl_unlock();
+}
+
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -9790,6 +10763,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
pci_enable_pcie_error_reporting(pdev);
INIT_WORK(&bp->sp_task, bnxt_sp_task);
+ INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
spin_lock_init(&bp->ntp_fltr_lock);
#if BITS_PER_LONG == 32
@@ -10333,7 +11307,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
free_netdev(dev);
}
-static int bnxt_probe_phy(struct bnxt *bp)
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
@@ -10344,8 +11318,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
rc);
return rc;
}
- mutex_init(&bp->link_lock);
-
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -10359,6 +11331,9 @@ static int bnxt_probe_phy(struct bnxt *bp)
if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
link_info->support_auto_speeds = link_info->support_speeds;
+ if (!fw_dflt)
+ return 0;
+
/*initialize the ethool setting copy with NVM settings */
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
link_info->autoneg = BNXT_AUTONEG_SPEED;
@@ -10379,7 +11354,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
else
link_info->req_flow_ctrl = link_info->force_pause_setting;
- return rc;
+ return 0;
}
static int bnxt_get_max_irq(struct pci_dev *pdev)
@@ -10683,32 +11658,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
mutex_init(&bp->hwrm_cmd_lock);
- rc = bnxt_hwrm_ver_get(bp);
+ mutex_init(&bp->link_lock);
+
+ rc = bnxt_fw_init_one_p1(bp);
if (rc)
goto init_err_pci_clean;
- if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
- rc = bnxt_alloc_kong_hwrm_resources(bp);
- if (rc)
- bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
- }
-
- if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
- bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
- rc = bnxt_alloc_hwrm_short_cmd_req(bp);
- if (rc)
- goto init_err_pci_clean;
- }
-
if (BNXT_CHIP_P5(bp))
bp->flags |= BNXT_FLAG_CHIP_P5;
- rc = bnxt_hwrm_func_reset(bp);
+ rc = bnxt_fw_init_one_p2(bp);
if (rc)
goto init_err_pci_clean;
- bnxt_hwrm_fw_set_time(bp);
-
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
@@ -10746,41 +11708,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x;
+ else if (BNXT_CHIP_P5(bp))
+ bp->gro_func = bnxt_gro_func_5750x;
}
if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
- rc = bnxt_hwrm_func_drv_rgtr(bp);
- if (rc)
- goto init_err_pci_clean;
-
- rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
- if (rc)
- goto init_err_pci_clean;
-
bp->ulp_probe = bnxt_ulp_probe;
- rc = bnxt_hwrm_queue_qportcfg(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
- /* Get the MAX capabilities for this function */
- rc = bnxt_hwrm_func_qcaps(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
-
- rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
- if (rc)
- netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
- rc);
-
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
@@ -10794,17 +11729,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
}
- bnxt_hwrm_func_qcfg(bp);
- bnxt_hwrm_vnic_qcaps(bp);
- bnxt_hwrm_port_led_qcaps(bp);
- bnxt_ethtool_init(bp);
- bnxt_dcb_init(bp);
/* MTU range: 60 - FW defined max */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = bp->max_mtu;
- rc = bnxt_probe_phy(bp);
+ rc = bnxt_probe_phy(bp, true);
if (rc)
goto init_err_pci_clean;
@@ -10818,24 +11748,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
}
- /* Default RSS hash cfg. */
- bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
- bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
- bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
- }
-
- if (bnxt_rfs_supported(bp)) {
- dev->hw_features |= NETIF_F_NTUPLE;
- if (bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- dev->features |= NETIF_F_NTUPLE;
- }
- }
+ bnxt_fw_init_one_p3(bp);
if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
@@ -10849,16 +11762,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- bnxt_get_wol_settings(bp);
- if (bp->flags & BNXT_FLAG_WOL_CAP)
- device_set_wakeup_enable(&pdev->dev, bp->wol);
- else
- device_set_wakeup_capable(&pdev->dev, false);
-
- bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
-
- bnxt_hwrm_coal_params_qcaps(bp);
-
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -10895,6 +11798,8 @@ init_err_pci_clean:
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->fw_health);
+ bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
init_err_free:
@@ -10934,8 +11839,7 @@ shutdown_exit:
#ifdef CONFIG_PM_SLEEP
static int bnxt_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
@@ -10951,8 +11855,7 @@ static int bnxt_suspend(struct device *device)
static int bnxt_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 16694b704d15..333b0a85a7fd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -113,6 +113,7 @@ struct tx_cmp {
#define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
#define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
@@ -263,14 +264,21 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
+ #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
+#define TPA_AGG_AGG_ID(rx_agg) \
+ ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \
+ RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
+
struct rx_tpa_start_cmp {
__le32 rx_tpa_start_cmp_len_flags_type;
#define RX_TPA_START_CMP_TYPE (0x3f << 0)
#define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
#define RX_TPA_START_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
@@ -278,6 +286,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
#define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
@@ -291,6 +300,8 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_start_cmp_rss_hash;
};
@@ -308,6 +319,14 @@ struct rx_tpa_start_cmp {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+#define TPA_START_AGG_ID_P5(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5)
+
+#define TPA_START_ERROR(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+
struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
@@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
#define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
+ #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
+ #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
__le32 rx_tpa_start_cmp_metadata;
__le32 rx_tpa_start_cmp_cfa_code_v2;
#define RX_TPA_START_CMP_V2 (0x1 << 0)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
__le32 rx_tpa_start_cmp_hdr_info;
@@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext {
(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+#define TPA_START_ERROR_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -361,6 +395,8 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
#define RX_TPA_END_GRO_TS (0x1 << 31)
@@ -370,6 +406,18 @@ struct rx_tpa_end_cmp {
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+#define TPA_END_AGG_ID_P5(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5)
+
+#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
+
+#define TPA_END_AGG_BUFS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
+
#define TPA_END_TPA_SEGS(rx_tpa_end) \
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
@@ -389,6 +437,10 @@ struct rx_tpa_end_cmp {
struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks;
#define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16
+ #define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24)
+ #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24
__le32 rx_tpa_end_cmp_seg_len;
#define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
@@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_errors_v2;
#define RX_TPA_END_CMP_V2 (0x1 << 0)
#define RX_TPA_END_CMP_ERRORS (0x3 << 1)
+ #define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1)
#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
u32 rx_tpa_end_cmp_start_opaque;
};
@@ -405,6 +463,28 @@ struct rx_tpa_end_cmp_ext {
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5)
+
+#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5)
+
+#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+
+#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
+ !!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
+
+#define EVENT_DATA1_RECOVERY_ENABLED(data1) \
+ !!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
+
struct nqe_cn {
__le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL
@@ -487,6 +567,9 @@ struct nqe_cn {
#define BNXT_DEFAULT_TX_RING_SIZE 511
#define MAX_TPA 64
+#define MAX_TPA_P5 256
+#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1)
+#define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16)
#define MAX_RX_PAGES 1
@@ -562,6 +645,7 @@ struct nqe_cn {
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
+#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_RESP_ERR_CODE_MASK 0xffff
@@ -768,6 +852,15 @@ struct bnxt_tpa_info {
((hdr_info) & 0x1ff)
u16 cfa_code; /* cfa_code in TPA start compl */
+ u8 agg_count;
+ struct rx_agg_cmp *agg_arr;
+};
+
+#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
+
+struct bnxt_tpa_idx_map {
+ u16 agg_id_tbl[1024];
+ unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
};
struct bnxt_rx_ring_info {
@@ -797,6 +890,7 @@ struct bnxt_rx_ring_info {
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
struct bnxt_tpa_info *rx_tpa;
+ struct bnxt_tpa_idx_map *rx_tpa_idx_map;
struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct;
@@ -978,6 +1072,7 @@ struct bnxt_pf_info {
u8 mac_addr[ETH_ALEN];
u32 first_vf_id;
u16 active_vfs;
+ u16 registered_vfs;
u16 max_vfs;
u32 max_encap_records;
u32 max_decap_records;
@@ -1137,6 +1232,9 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+#define BNXT_GRC_BASE_MASK 0xfffff000
+#define BNXT_GRC_OFFSET_MASK 0x00000ffc
+
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
@@ -1253,6 +1351,53 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info *tqm_mem[9];
};
+struct bnxt_fw_health {
+ u32 flags;
+ u32 polling_dsecs;
+ u32 master_func_wait_dsecs;
+ u32 normal_func_wait_dsecs;
+ u32 post_reset_wait_dsecs;
+ u32 post_reset_max_wait_dsecs;
+ u32 regs[4];
+ u32 mapped_regs[4];
+#define BNXT_FW_HEALTH_REG 0
+#define BNXT_FW_HEARTBEAT_REG 1
+#define BNXT_FW_RESET_CNT_REG 2
+#define BNXT_FW_RESET_INPROG_REG 3
+ u32 fw_reset_inprog_reg_mask;
+ u32 last_fw_heartbeat;
+ u32 last_fw_reset_cnt;
+ u8 enabled:1;
+ u8 master:1;
+ u8 tmr_multiplier;
+ u8 tmr_counter;
+ u8 fw_reset_seq_cnt;
+ u32 fw_reset_seq_regs[16];
+ u32 fw_reset_seq_vals[16];
+ u32 fw_reset_seq_delay_msec[16];
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *fw_reset_reporter;
+ struct devlink_health_reporter *fw_fatal_reporter;
+};
+
+struct bnxt_fw_reporter_ctx {
+ unsigned long sp_event;
+};
+
+#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
+#define BNXT_FW_HEALTH_REG_TYPE_CFG 0
+#define BNXT_FW_HEALTH_REG_TYPE_GRC 1
+#define BNXT_FW_HEALTH_REG_TYPE_BAR0 2
+#define BNXT_FW_HEALTH_REG_TYPE_BAR1 3
+
+#define BNXT_FW_HEALTH_REG_TYPE(reg) ((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK)
+#define BNXT_FW_HEALTH_REG_OFF(reg) ((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK)
+
+#define BNXT_FW_HEALTH_WIN_BASE 0x3000
+#define BNXT_FW_HEALTH_WIN_MAP_OFF 8
+
+#define BNXT_FW_STATUS_HEALTHY 0x8000
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1282,7 +1427,9 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
@@ -1379,12 +1526,14 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- !(bp->flags & BNXT_FLAG_CHIP_P5) && \
- !is_kdump_kernel())
+ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
+ (bp)->max_tpa_v2) && !is_kdump_kernel())
/* Chip class phase 5 */
#define BNXT_CHIP_P5(bp) \
- ((bp)->chip_num == CHIP_NUM_57500)
+ ((bp)->chip_num == CHIP_NUM_57508 || \
+ (bp)->chip_num == CHIP_NUM_57504 || \
+ (bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
@@ -1414,6 +1563,8 @@ struct bnxt {
u16, void *, u8 *, dma_addr_t,
unsigned int);
+ u16 max_tpa_v2;
+ u16 max_tpa;
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
u16 rx_offset;
@@ -1469,6 +1620,10 @@ struct bnxt {
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
#define BNXT_STATE_READ_STATS 2
+#define BNXT_STATE_FW_RESET_DET 3
+#define BNXT_STATE_IN_FW_RESET 4
+#define BNXT_STATE_ABORT_ERR 5
+#define BNXT_STATE_FW_FATAL_COND 6
struct bnxt_irq *irq_tbl;
int total_irqs;
@@ -1493,6 +1648,7 @@ struct bnxt {
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800
+ #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
@@ -1525,6 +1681,7 @@ struct bnxt {
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
+ u16 hw_ring_stats_size;
u8 pri2cos[8];
u8 pri2cos_valid;
@@ -1576,6 +1733,24 @@ struct bnxt {
#define BNXT_FLOW_STATS_SP_EVENT 15
#define BNXT_UPDATE_PHY_SP_EVENT 16
#define BNXT_RING_COAL_NOW_SP_EVENT 17
+#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
+#define BNXT_FW_EXCEPTION_SP_EVENT 19
+
+ struct delayed_work fw_reset_task;
+ int fw_reset_state;
+#define BNXT_FW_RESET_STATE_POLL_VF 1
+#define BNXT_FW_RESET_STATE_RESET_FW 2
+#define BNXT_FW_RESET_STATE_ENABLE_DEV 3
+#define BNXT_FW_RESET_STATE_POLL_FW 4
+#define BNXT_FW_RESET_STATE_OPENING 5
+
+ u16 fw_reset_min_dsecs;
+#define BNXT_DFLT_FW_RST_MIN_DSECS 20
+ u16 fw_reset_max_dsecs;
+#define BNXT_DFLT_FW_RST_MAX_DSECS 60
+ unsigned long fw_reset_timestamp;
+
+ struct bnxt_fw_health *fw_health;
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
@@ -1637,7 +1812,6 @@ struct bnxt {
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
- struct dentry *debugfs_dim;
struct device *hwmon_dev;
};
@@ -1782,6 +1956,7 @@ extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
+u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
@@ -1814,6 +1989,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_fw_exception(struct bnxt *bp);
+void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 07301cb87c03..fb6f30d0d1d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -377,8 +377,6 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set.hdr_cnt = 1;
rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
set_app_exit:
dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
@@ -391,6 +389,7 @@ static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
struct hwrm_queue_dscp_qcaps_input req = {0};
int rc;
+ bp->max_dscp_value = 0;
if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
return 0;
@@ -433,8 +432,6 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
dscp2pri->pri = app->priority;
req.entry_cnt = cpu_to_le16(1);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
mapping);
return rc;
@@ -722,6 +719,7 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
void bnxt_dcb_init(struct bnxt *bp)
{
+ bp->dcbx_cap = 0;
if (bp->hwrm_spec_code < 0x10501)
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 61393f351a77..156c2404854f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -61,45 +61,30 @@ static const struct file_operations debugfs_dim_fops = {
.read = debugfs_dim_read,
};
-static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx,
- struct dentry *dd)
+static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
+ struct dentry *dd)
{
static char qname[16];
snprintf(qname, 10, "%d", ring_idx);
- return debugfs_create_file(qname, 0600, dd,
- dim, &debugfs_dim_fops);
+ debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
void bnxt_debug_dev_init(struct bnxt *bp)
{
const char *pname = pci_name(bp->pdev);
- struct dentry *pdevf;
+ struct dentry *dir;
int i;
bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
- if (bp->debugfs_pdev) {
- pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
- if (!pdevf) {
- pr_err("failed to create debugfs entry %s/dim\n",
- pname);
- return;
- }
- bp->debugfs_dim = pdevf;
- /* create files for each rx ring */
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ dir = debugfs_create_dir("dim", bp->debugfs_pdev);
- if (cpr && bp->bnapi[i]->rx_ring) {
- pdevf = debugfs_dim_ring_init(&cpr->dim, i,
- bp->debugfs_dim);
- if (!pdevf)
- pr_err("failed to create debugfs entry %s/dim/%d\n",
- pname, i);
- }
- }
- } else {
- pr_err("failed to create debugfs entry %s\n", pname);
+ /* create files for each rx ring */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (cpr && bp->bnapi[i]->rx_ring)
+ debugfs_dim_ring_init(&cpr->dim, i, dir);
}
}
@@ -114,8 +99,6 @@ void bnxt_debug_dev_exit(struct bnxt *bp)
void bnxt_debug_init(void)
{
bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
- if (!bnxt_debug_mnt)
- pr_err("failed to init bnxt_en debugfs\n");
}
void bnxt_debug_exit(void)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index c05d663212b2..e664392dccc0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -15,6 +15,192 @@
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
+static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ struct bnxt_fw_health *health = bp->fw_health;
+ u32 val, health_status;
+ int rc;
+
+ if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return 0;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ health_status = val & 0xffff;
+
+ if (health_status == BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Healthy;");
+ if (rc)
+ return rc;
+ } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Not yet completed initialization;");
+ if (rc)
+ return rc;
+ } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Encountered fatal error and cannot recover;");
+ if (rc)
+ return rc;
+ }
+
+ if (val >> 16) {
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
+ if (rc)
+ return rc;
+ }
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = bnxt_fw_reporter_diagnose,
+};
+
+static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+
+ if (!priv_ctx)
+ return -EOPNOTSUPP;
+
+ bnxt_fw_reset(bp);
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
+ .name = "fw_reset",
+ .recover = bnxt_fw_reset_recover,
+};
+
+static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
+ unsigned long event;
+
+ if (!priv_ctx)
+ return -EOPNOTSUPP;
+
+ event = fw_reporter_ctx->sp_event;
+ if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
+ bnxt_fw_reset(bp);
+ else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
+ bnxt_fw_exception(bp);
+
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = bnxt_fw_fatal_recover,
+};
+
+static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+
+ if (!health)
+ return;
+
+ health->fw_reporter =
+ devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
+ 0, false, bp);
+ if (IS_ERR(health->fw_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reporter));
+ health->fw_reporter = NULL;
+ }
+
+ health->fw_reset_reporter =
+ devlink_health_reporter_create(bp->dl,
+ &bnxt_dl_fw_reset_reporter_ops,
+ 0, true, bp);
+ if (IS_ERR(health->fw_reset_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reset_reporter));
+ health->fw_reset_reporter = NULL;
+ }
+
+ health->fw_fatal_reporter =
+ devlink_health_reporter_create(bp->dl,
+ &bnxt_dl_fw_fatal_reporter_ops,
+ 0, true, bp);
+ if (IS_ERR(health->fw_fatal_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_fatal_reporter));
+ health->fw_fatal_reporter = NULL;
+ }
+}
+
+static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+
+ if (!health)
+ return;
+
+ if (health->fw_reporter)
+ devlink_health_reporter_destroy(health->fw_reporter);
+
+ if (health->fw_reset_reporter)
+ devlink_health_reporter_destroy(health->fw_reset_reporter);
+
+ if (health->fw_fatal_reporter)
+ devlink_health_reporter_destroy(health->fw_fatal_reporter);
+}
+
+void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct bnxt_fw_reporter_ctx fw_reporter_ctx;
+
+ if (!fw_health)
+ return;
+
+ fw_reporter_ctx.sp_event = event;
+ switch (event) {
+ case BNXT_FW_RESET_NOTIFY_SP_EVENT:
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ if (!fw_health->fw_fatal_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_fatal_reporter,
+ "FW fatal async event received",
+ &fw_reporter_ctx);
+ return;
+ }
+ if (!fw_health->fw_reset_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_reset_reporter,
+ "FW non-fatal reset event received",
+ &fw_reporter_ctx);
+ return;
+
+ case BNXT_FW_EXCEPTION_SP_EVENT:
+ if (!fw_health->fw_fatal_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_fatal_reporter,
+ "FW fatal error reported",
+ &fw_reporter_ctx);
+ return;
+ }
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -109,13 +295,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
memcpy(buf, data_addr, bytesize);
dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
- return -EACCES;
- } else if (rc) {
- return -EIO;
- }
- return 0;
+ return rc;
}
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
@@ -251,6 +433,8 @@ int bnxt_dl_register(struct bnxt *bp)
devlink_params_publish(dl);
+ bnxt_dl_fw_reporters_create(bp);
+
return 0;
err_dl_port_unreg:
@@ -273,6 +457,7 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
+ bnxt_dl_fw_reporters_destroy(bp);
devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
ARRAY_SIZE(bnxt_dl_port_params));
devlink_port_unregister(&bp->dl_port);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 5b6b2c7d97cf..b97e0baeb42d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -55,6 +55,7 @@ struct bnxt_dl_nvm_param {
u16 num_bits;
};
+void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8445a0cce849..235265eeec7d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,44 @@ reset_coalesce:
return rc;
}
-#define BNXT_NUM_STATS 22
+static const char * const bnxt_ring_stats_str[] = {
+ "rx_ucast_packets",
+ "rx_mcast_packets",
+ "rx_bcast_packets",
+ "rx_discards",
+ "rx_drops",
+ "rx_ucast_bytes",
+ "rx_mcast_bytes",
+ "rx_bcast_bytes",
+ "tx_ucast_packets",
+ "tx_mcast_packets",
+ "tx_bcast_packets",
+ "tx_discards",
+ "tx_drops",
+ "tx_ucast_bytes",
+ "tx_mcast_bytes",
+ "tx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tpa_stats_str[] = {
+ "tpa_packets",
+ "tpa_bytes",
+ "tpa_events",
+ "tpa_aborts",
+};
+
+static const char * const bnxt_ring_tpa2_stats_str[] = {
+ "rx_tpa_eligible_pkt",
+ "rx_tpa_eligible_bytes",
+ "rx_tpa_pkt",
+ "rx_tpa_bytes",
+ "rx_tpa_errors",
+};
+
+static const char * const bnxt_ring_sw_stats_str[] = {
+ "rx_l4_csum_errors",
+ "missed_irqs",
+};
#define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -207,6 +244,20 @@ reset_coalesce:
BNXT_TX_STATS_EXT_COS_ENTRY(6), \
BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
+ BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
+
+#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
+
#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
__stringify(counter##_pri##n) }
@@ -352,6 +403,7 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
};
static const struct {
@@ -417,9 +469,29 @@ static const struct {
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
+static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
+{
+ if (BNXT_SUPPORTS_TPA(bp)) {
+ if (bp->max_tpa_v2)
+ return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ }
+ return 0;
+}
+
+static int bnxt_get_num_ring_stats(struct bnxt *bp)
+{
+ int num_stats;
+
+ num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
+ ARRAY_SIZE(bnxt_ring_sw_stats_str) +
+ bnxt_get_num_tpa_ring_stats(bp);
+ return num_stats * bp->cp_nr_rings;
+}
+
static int bnxt_get_num_stats(struct bnxt *bp)
{
- int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ int num_stats = bnxt_get_num_ring_stats(bp);
num_stats += BNXT_NUM_SW_FUNC_STATS;
@@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+ u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
+ bnxt_get_num_tpa_ring_stats(bp);
if (!bp->bnapi) {
- j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS;
+ j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
goto skip_ring_stats;
}
@@ -551,56 +624,39 @@ skip_ring_stats:
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnxt *bp = netdev_priv(dev);
- u32 i;
+ static const char * const *str;
+ u32 i, j, num_str;
switch (stringset) {
- /* The number of strings must match BNXT_NUM_STATS defined above. */
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- sprintf(buf, "[%d]: rx_ucast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_mcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_bcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_discards", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_drops", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_ucast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_mcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_bcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_ucast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_mcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_bcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_discards", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_drops", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_ucast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_mcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_bcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_events", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_aborts", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_l4_csum_errors", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: missed_irqs", i);
- buf += ETH_GSTRING_LEN;
+ num_str = ARRAY_SIZE(bnxt_ring_stats_str);
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ if (!BNXT_SUPPORTS_TPA(bp))
+ goto skip_tpa_stats;
+
+ if (bp->max_tpa_v2) {
+ num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ str = bnxt_ring_tpa2_stats_str;
+ } else {
+ num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ str = bnxt_ring_tpa_stats_str;
+ }
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i, str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+skip_tpa_stats:
+ num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -1643,6 +1699,11 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+static void bnxt_print_admin_err(struct bnxt *bp)
+{
+ netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
+}
+
static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length);
@@ -1682,13 +1743,8 @@ static int bnxt_flash_nvram(struct net_device *dev,
rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to flash the device\n");
- rc = -EACCES;
- } else if (rc) {
- rc = -EIO;
- }
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -1738,13 +1794,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to reset the device\n");
- rc = -EACCES;
- } else if (rc) {
- rc = -EIO;
- }
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -2039,13 +2090,8 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
err_exit:
- if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to flash the device\n");
- rc = -EACCES;
- } else if (hwrm_err) {
- rc = -EOPNOTSUPP;
- }
+ if (hwrm_err == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -2584,8 +2630,6 @@ static int bnxt_set_phys_id(struct net_device *dev,
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -3306,6 +3350,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
+ bp->num_tests = 0;
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
@@ -3315,7 +3360,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (rc)
goto ethtool_init_exit;
- test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
+ test_info = bp->test_info;
+ if (!test_info)
+ test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
if (!test_info)
goto ethtool_init_exit;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 12bbb2a207d0..2cdef753a1bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,7 +1,8 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2019 Broadcom Limited
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * Copyright (c) 2018-2019 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -39,15 +40,15 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
-#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
-#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
-#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
-#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+#define TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY
/* tlv (size:64b/8B) */
@@ -267,7 +268,6 @@ struct cmd_nums {
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_CFA_TFLIB 0x125UL
- #define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -313,6 +313,7 @@ struct cmd_nums {
#define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -410,8 +411,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 69
-#define HWRM_VERSION_STR "1.10.0.69"
+#define HWRM_VERSION_RSVD 89
+#define HWRM_VERSION_STR "1.10.0.89"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -624,6 +625,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1122,6 +1125,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1241,6 +1245,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -2916,7 +2921,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:2624b/328B) */
+/* rx_port_stats_ext (size:3648b/456B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -2959,6 +2964,22 @@ struct rx_port_stats_ext {
__le64 rx_buffer_passed_threshold;
__le64 rx_pcs_symbol_err;
__le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -6115,6 +6136,21 @@ struct hwrm_cfa_flow_alloc_output {
u8 valid;
};
+/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_flow_alloc_cmd_err {
+ u8 code;
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
+ u8 unused_0[7];
+};
+
/* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
@@ -6305,7 +6341,7 @@ struct hwrm_cfa_eem_qcaps_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
struct hwrm_cfa_eem_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -6322,15 +6358,17 @@ struct hwrm_cfa_eem_qcaps_output {
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
__le32 max_entries_supported;
__le16 key_entry_size;
__le16 record_entry_size;
__le16 efc_entry_size;
- u8 unused_1;
+ __le16 fid_entry_size;
+ u8 unused_1[7];
u8 valid;
};
-/* hwrm_cfa_eem_cfg_input (size:320b/40B) */
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
struct hwrm_cfa_eem_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6350,6 +6388,9 @@ struct hwrm_cfa_eem_cfg_input {
__le16 key1_ctx_id;
__le16 record_ctx_id;
__le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ __le16 unused_2;
+ __le32 unused_3;
};
/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
@@ -6375,7 +6416,7 @@ struct hwrm_cfa_eem_qcfg_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
struct hwrm_cfa_eem_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -6386,7 +6427,12 @@ struct hwrm_cfa_eem_qcfg_output {
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries;
- u8 unused_0[7];
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ u8 unused_2[5];
u8 valid;
};
@@ -6567,6 +6613,31 @@ struct ctx_hw_stats {
__le64 tpa_aborts;
};
+/* ctx_hw_stats_ext (size:1344b/168B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+};
+
/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
@@ -6578,7 +6649,8 @@ struct hwrm_stat_ctx_alloc_input {
__le32 update_period_ms;
u8 stat_ctx_flags;
#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0[3];
+ u8 unused_0;
+ __le16 stats_dma_length;
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -7204,7 +7276,9 @@ struct coredump_segment_record {
u8 version_hi;
u8 version_low;
u8 seg_flags;
- u8 unused_0[7];
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[6];
};
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
@@ -7729,6 +7803,9 @@ struct hwrm_nvm_set_variable_input {
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
u8 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 2b90a2bb1a1d..f6f3454d6059 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -25,7 +25,6 @@
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf, u16 event_id)
{
- struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_fwd_async_event_cmpl_input req = {0};
struct hwrm_async_event_cmpl *async_cmpl;
int rc = 0;
@@ -40,23 +39,10 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
- goto fwd_async_event_cmpl_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_async_event_cmpl_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -133,7 +119,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
- return -EIO;
+ return rc;
}
vf->func_qcfg_flags = le16_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -164,9 +150,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
else
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -EIO;
- return 0;
+ return rc;
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
@@ -486,10 +470,43 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+/* Caller holds bp->hwrm_cmd_lock mutex lock */
+static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt_vf_info *vf;
+
+ vf = &bp->pf.vf[vf_id];
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
+ }
+ if (vf->vlan) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = cpu_to_le16(vf->vlan);
+ }
+ if (vf->max_tx_rate) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+ req.max_bw = cpu_to_le32(vf->max_tx_rate);
+#ifdef HAVE_IFLA_TX_RATE
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req.min_bw = cpu_to_le32(vf->min_tx_rate);
+#endif
+ }
+ if (vf->flags & BNXT_VF_TRUST)
+ req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
+
+ _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
/* Only called by PF to reserve resources for VFs, returns actual number of
* VFs configured, or < 0 on error.
*/
-static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
struct hwrm_func_vf_resource_cfg_input req = {0};
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -561,13 +578,14 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
+ if (reset)
+ __bnxt_set_vf_params(bp, i);
+
req.vf_id = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
break;
- }
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = pf->first_vf_id + i;
}
@@ -664,8 +682,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -ENOMEM;
if (pf->active_vfs) {
hw_resc->max_tx_rings -= total_vf_tx_rings;
hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
@@ -679,14 +695,40 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
return rc;
}
-static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
if (BNXT_NEW_RM(bp))
- return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+ return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
else
return bnxt_hwrm_func_cfg(bp, num_vfs);
}
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
+{
+ int rc;
+
+ /* Register buffers for VFs */
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ return rc;
+
+ /* Reserve resources for VFs */
+ rc = bnxt_func_cfg(bp, *num_vfs, reset);
+ if (rc != *num_vfs) {
+ if (rc <= 0) {
+ netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+ *num_vfs = 0;
+ return rc;
+ }
+ netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
+ rc);
+ *num_vfs = rc;
+ }
+
+ bnxt_ulp_sriov_cfg(bp, *num_vfs);
+ return 0;
+}
+
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
int rc = 0, vfs_supported;
@@ -752,25 +794,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out1;
- /* Reserve resources for VFs */
- rc = bnxt_func_cfg(bp, *num_vfs);
- if (rc != *num_vfs) {
- if (rc <= 0) {
- netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
- *num_vfs = 0;
- goto err_out2;
- }
- netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
- *num_vfs = rc;
- }
-
- /* Register buffers for VFs */
- rc = bnxt_hwrm_func_buf_rgtr(bp);
+ rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
if (rc)
goto err_out2;
- bnxt_ulp_sriov_cfg(bp, *num_vfs);
-
rc = pci_enable_sriov(bp->pdev, *num_vfs);
if (rc)
goto err_out2;
@@ -837,6 +864,11 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
rtnl_unlock();
return 0;
}
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ rtnl_unlock();
+ return 0;
+ }
bp->sriov_cfg = true;
rtnl_unlock();
@@ -870,7 +902,6 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_fwd_resp_input req = {0};
- struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -885,22 +916,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_cmpl_ring = encap_resp_cpr;
memcpy(req.encap_resp, encap_resp, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
- goto fwd_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -909,7 +927,6 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_reject_fwd_resp_input req = {0};
- struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -920,22 +937,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
- goto fwd_err_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_err_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -944,7 +948,6 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_exec_fwd_resp_input req = {0};
- struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -955,22 +958,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
- goto exec_fwd_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-exec_fwd_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -1190,6 +1180,13 @@ mac_done:
}
#else
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
+{
+ if (*num_vfs)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
void bnxt_sriov_disable(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 2eed9eda1195..629641bf6fc5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -36,6 +36,7 @@ int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index dd621f6bd127..c8062d020d1e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -319,8 +319,6 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -515,11 +513,6 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
- rc = -ENOSPC;
- else if (rc)
- rc = -EIO;
return rc;
}
@@ -591,8 +584,6 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -609,8 +600,6 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -660,8 +649,6 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -678,8 +665,6 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -1457,8 +1442,6 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index fc77caf0a076..b2c160947fc8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -226,6 +226,9 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct input *req;
int rc;
+ if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
+ return -EBUSY;
+
mutex_lock(&bp->hwrm_cmd_lock);
req = fw_msg->msg;
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 57dc3cbff36e..155599dcee76 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4096,12 +4096,16 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 port_id;
+ int i;
cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
+ for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
+ atomic_set(&cp->csk_tbl[i].ref_count, 0);
+
port_id = prandom_u32();
port_id %= CNIC_LOCAL_PORT_RANGE;
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
@@ -5480,6 +5484,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
+ atomic_set(&cdev->ref_count, 0);
cp = cdev->cnic_priv;
cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b22196880d6d..12cb77ef1081 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2516,19 +2516,14 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
struct netdev_queue *txq;
- struct sk_buff *skb;
- struct enet_cb *cb;
int i;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++) {
- cb = priv->tx_cbs + i;
- skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
- if (skb)
- dev_kfree_skb(skb);
- }
+ for (i = 0; i < priv->num_tx_bds; i++)
+ dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
+ priv->tx_cbs + i));
for (i = 0; i < priv->hw_params->tx_queues; i++) {
txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
@@ -3438,7 +3433,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
- struct resource *r;
unsigned int i;
int err = -EIO;
const char *phy_mode_str;
@@ -3478,8 +3472,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
macaddr = pd->mac_address;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
err = PTR_ERR(priv->base);
goto err;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4c404d2213f9..77f3511b97de 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18041,8 +18041,7 @@ static void tg3_remove_one(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int tg3_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
int err = 0;
@@ -18098,8 +18097,7 @@ unlock:
static int tg3_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
int err = 0;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7767ae6fa1fd..e338272931d1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3032,7 +3032,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
head_unmap->nvecs++;
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 99f49d059414..f96a42af1014 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1104,7 +1104,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
paddr = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index eab805579f96..7f3b2e3b0868 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1492,11 +1492,11 @@ static void free_netsgbuf(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -1535,11 +1535,11 @@ static void free_netsgbuf_with_resp(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -2424,7 +2424,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
} else {
int i, frags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct octnic_gather *g;
spin_lock(&lio->glist_lock[q_idx]);
@@ -2462,11 +2462,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
g->sg[(i >> 2)].ptr[(i & 3)] =
- dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) {
@@ -2478,7 +2476,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3],
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -2486,7 +2484,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
i++;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index db0b90555acb..370d76822ee0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -837,11 +837,11 @@ static void free_netsgbuf(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -881,11 +881,11 @@ static void free_netsgbuf_with_resp(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -1497,7 +1497,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
ndata.reqtype = REQTYPE_NORESP_NET;
} else {
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct octnic_gather *g;
int i, frags;
@@ -1535,11 +1535,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
g->sg[(i >> 2)].ptr[(i & 3)] =
- dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) {
dma_unmap_single(&oct->pci_dev->dev,
@@ -1550,7 +1548,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3],
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -1558,7 +1556,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
i++;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 021d99cd1665..614d07be7181 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -260,9 +260,7 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
dev_info(&oct->pci_dev->dev,
"got a request for FLR from VF that owns DPI ring %u\n",
mbox->q_no);
- pcie_capability_set_word(
- oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no],
- PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]);
break;
case OCTEON_PF_CHANGED_VF_MACADDR:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 192bc92da881..4ab57d33a87e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1588,15 +1588,13 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
goto doorbell;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_frag_size(frag);
dma_addr = dma_map_page_attrs(&nic->pdev->dev,
skb_frag_page(frag),
- frag->page_offset, size,
+ skb_frag_off(frag), size,
DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 89db739b7819..6dabbf1502c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag;
+ skb_frag_t *rx_frag;
int nr_frags;
int offset = 0;
@@ -2182,7 +2182,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
rx_frag += nr_frags;
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
- rx_frag->page_offset = sd->pg_chunk.offset + offset;
+ skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
skb_frag_size_set(rx_frag, len);
skb->len += len;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index d692251ee252..ae6a47dd7dc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3531,7 +3531,6 @@ int t4_setup_debugfs(struct adapter *adap)
{
int i;
u32 size = 0;
- struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
{ "cim_la", &cim_la_fops, 0400, 0 },
@@ -3642,8 +3641,8 @@ int t4_setup_debugfs(struct adapter *adap)
}
}
- de = debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
- &flash_debugfs_fops, adap->params.sf_size);
+ debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
+ &flash_debugfs_fops, adap->params.sf_size);
debugfs_create_bool("use_backdoor", 0600,
adap->debugfs_root, &adap->use_bd);
debugfs_create_bool("trace_rss", 0600,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4311ad9c84b2..71854a19cebe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6269,10 +6269,7 @@ static int __init cxgb4_init_module(void)
{
int ret;
- /* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4_debugfs_root)
- pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
index eaf1fb74689c..01c65d13fc0e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -57,7 +57,7 @@ struct smt_data *t4_init_smt(void)
s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
spin_lock_init(&s->smtab[i].lock);
- atomic_set(&s->smtab[i].refcnt, 0);
+ s->smtab[i].refcnt = 0;
}
return s;
}
@@ -68,7 +68,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (atomic_read(&e->refcnt) == 0) {
+ if (e->refcnt == 0) {
if (!first_free)
first_free = e;
} else {
@@ -97,11 +97,9 @@ found_reuse:
static void t4_smte_free(struct smt_entry *e)
{
- spin_lock_bh(&e->lock);
- if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->refcnt == 0) { /* hasn't been recycled */
e->state = SMT_STATE_UNUSED;
}
- spin_unlock_bh(&e->lock);
}
/**
@@ -111,8 +109,10 @@ static void t4_smte_free(struct smt_entry *e)
*/
void cxgb4_smt_release(struct smt_entry *e)
{
- if (atomic_dec_and_test(&e->refcnt))
+ spin_lock_bh(&e->lock);
+ if ((--e->refcnt) == 0)
t4_smte_free(e);
+ spin_unlock_bh(&e->lock);
}
EXPORT_SYMBOL(cxgb4_smt_release);
@@ -215,14 +215,14 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
e = find_or_alloc_smte(s, smac);
if (e) {
spin_lock(&e->lock);
- if (!atomic_read(&e->refcnt)) {
- atomic_set(&e->refcnt, 1);
+ if (!e->refcnt) {
+ e->refcnt = 1;
e->state = SMT_STATE_SWITCHING;
e->pfvf = pfvf;
memcpy(e->src_mac, smac, ETH_ALEN);
write_smt_entry(adap, e);
} else {
- atomic_inc(&e->refcnt);
+ ++e->refcnt;
}
spin_unlock(&e->lock);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index d6c2cc271398..1268d6e93a47 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -59,7 +59,7 @@ struct smt_entry {
u16 idx;
u16 pfvf;
u8 src_mac[ETH_ALEN];
- atomic_t refcnt;
+ int refcnt;
spinlock_t lock; /* protect smt entry add,removal */
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 6d4cf3d0b2f0..f6fc0875d5b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2478,11 +2478,10 @@ static int setup_debugfs(struct adapter *adapter)
* Debugfs support is best effort.
*/
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
- (void)debugfs_create_file(debugfs_files[i].name,
- debugfs_files[i].mode,
- adapter->debugfs_root,
- (void *)adapter,
- debugfs_files[i].fops);
+ debugfs_create_file(debugfs_files[i].name,
+ debugfs_files[i].mode,
+ adapter->debugfs_root, (void *)adapter,
+ debugfs_files[i].fops);
return 0;
}
@@ -3257,11 +3256,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
- if (IS_ERR_OR_NULL(adapter->debugfs_root))
- dev_warn(&pdev->dev, "could not create debugfs"
- " directory");
- else
- setup_debugfs(adapter);
+ setup_debugfs(adapter);
}
/*
@@ -3486,13 +3481,11 @@ static int __init cxgb4vf_module_init(void)
return -EINVAL;
}
- /* Debugfs support is optional, just warn if this fails */
+ /* Debugfs support is optional, debugfs will warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
- pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
- if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
+ if (ret < 0)
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index b3e7fafee3df..c9aebcde403a 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1844,16 +1844,12 @@ cleanup_module(void)
static int __init cs89x0_platform_probe(struct platform_device *pdev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- struct net_local *lp;
- struct resource *mem_res;
void __iomem *virt_addr;
int err;
if (!dev)
return -ENOMEM;
- lp = netdev_priv(dev);
-
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq <= 0) {
dev_warn(&dev->dev, "interrupt resource missing\n");
@@ -1861,8 +1857,7 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
goto free;
}
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virt_addr = devm_ioremap_resource(&pdev->dev, mem_res);
+ virt_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(virt_addr)) {
err = PTR_ERR(virt_addr);
goto free;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9003eb6716cd..e736ce2c58ca 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1182,9 +1182,8 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
buflen = skb_headlen(skb);
} else {
skb_frag = skb_si->frags + frag;
- buffer = page_address(skb_frag_page(skb_frag)) +
- skb_frag->page_offset;
- buflen = skb_frag->size;
+ buffer = skb_frag_address(skb_frag);
+ buflen = skb_frag_size(skb_frag);
}
if (frag == last_frag) {
@@ -2423,10 +2422,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
/* Interrupt */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "no IRQ\n");
+ if (irq <= 0)
return irq ? irq : -ENODEV;
- }
port->irq = irq;
/* Clock the port */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 386bdc1378d1..cce90b5925d9 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1500,8 +1500,6 @@ dm9000_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq < 0) {
- dev_err(db->dev, "interrupt resource unavailable: %d\n",
- ndev->irq);
ret = ndev->irq;
goto out;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f287b5da5546..cf3e6f2892ff 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -192,7 +192,6 @@ struct be_eq_obj {
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
- bool enable;
u32 min_eqd; /* in usecs */
u32 max_eqd; /* in usecs */
u32 prev_eqd; /* in usecs */
@@ -589,6 +588,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
+ bool aic_enabled;
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 492f8769ac12..5bb5abf99588 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -329,8 +329,8 @@ static int be_get_coalesce(struct net_device *netdev,
et->tx_coalesce_usecs_high = aic->max_eqd;
et->tx_coalesce_usecs_low = aic->min_eqd;
- et->use_adaptive_rx_coalesce = aic->enable;
- et->use_adaptive_tx_coalesce = aic->enable;
+ et->use_adaptive_rx_coalesce = adapter->aic_enabled;
+ et->use_adaptive_tx_coalesce = adapter->aic_enabled;
return 0;
}
@@ -346,8 +346,9 @@ static int be_set_coalesce(struct net_device *netdev,
struct be_eq_obj *eqo;
int i;
+ adapter->aic_enabled = et->use_adaptive_rx_coalesce;
+
for_all_evt_queues(adapter, eqo, i) {
- aic->enable = et->use_adaptive_rx_coalesce;
aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4d8e40ac66d2..39eb7d525043 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1014,7 +1014,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
@@ -2147,7 +2147,7 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
int i;
aic = &adapter->aic_obj[eqo->idx];
- if (!aic->enable) {
+ if (!adapter->aic_enabled) {
if (aic->jiffies)
aic->jiffies = 0;
eqd = aic->et_eqd;
@@ -2204,7 +2204,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
int eqd;
u32 mult_enc;
- if (!aic->enable)
+ if (!adapter->aic_enabled)
return 0;
if (jiffies_to_msecs(now - aic->jiffies) < 1)
@@ -2346,8 +2346,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
skb_frag_set_page(skb, 0, page_info->page);
- skb_shinfo(skb)->frags[0].page_offset =
- page_info->page_offset + hdr_len;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[0],
+ page_info->page_offset + hdr_len);
skb_frag_size_set(&skb_shinfo(skb)->frags[0],
curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
@@ -2372,8 +2372,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
/* Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+ page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_shinfo(skb)->nr_frags++;
} else {
@@ -2454,8 +2454,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
/* First frag or Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+ page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
} else {
put_page(page_info->page);
@@ -2959,6 +2959,8 @@ static int be_evt_queues_create(struct be_adapter *adapter)
max(adapter->cfg_num_rx_irqs,
adapter->cfg_num_tx_irqs));
+ adapter->aic_enabled = true;
+
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
@@ -2966,7 +2968,6 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eqo->adapter = adapter;
eqo->idx = i;
aic->max_eqd = BE_MAX_EQD;
- aic->enable = true;
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 027225e1ade2..815fb62c4b02 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -576,7 +576,6 @@ static s32 nps_enet_probe(struct platform_device *pdev)
struct nps_enet_priv *priv;
s32 err = 0;
const char *mac_addr;
- struct resource *res_regs;
if (!dev->of_node)
return -ENODEV;
@@ -595,8 +594,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* FIXME :: no multicast support yet */
ndev->flags &= ~IFF_MULTICAST;
- res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs_base = devm_ioremap_resource(dev, res_regs);
+ priv->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs_base)) {
err = PTR_ERR(priv->regs_base);
goto out_netdev;
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index a9b105803fb7..73e4f2648e49 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -32,6 +32,7 @@ config FTGMAC100
depends on ARM || NDS32 || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select MDIO_ASPEED if MACH_ASPEED_G6
---help---
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 030fed65393e..9b7af94a40bb 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -774,7 +775,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
/* Map it */
map = skb_frag_dma_map(priv->dev, frag, 0, len,
@@ -1619,8 +1620,13 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
if (!priv->mii_bus)
return -EIO;
- if (priv->is_aspeed) {
- /* This driver supports the old MDIO interface */
+ if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ /* The AST2600 has a separate MDIO controller */
+
+ /* For the AST2400 and AST2500 this driver only supports the
+ * old MDIO interface
+ */
reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
@@ -1797,7 +1803,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
np = pdev->dev.of_node;
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
+ of_device_is_compatible(np, "aspeed,ast2500-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
@@ -1817,7 +1824,29 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
goto err_ncsi_dev;
- } else {
+ } else if (np && of_get_property(np, "phy-handle", NULL)) {
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to phy\n");
+ goto err_setup_mdio;
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.txt)
+ */
+ phy_support_asym_pause(phy);
+
+ /* Display what we found */
+ phy_attached_info(phy);
+ } else if (np && !of_get_child_by_name(np, "mdio")) {
+ /* Support legacy ASPEED devicetree descriptions that decribe a
+ * MAC with an embedded MDIO controller but have no "mdio"
+ * child node. Automatically scan the MDIO bus for available
+ * PHYs.
+ */
priv->use_ncsi = false;
err = ftgmac100_setup_mdio(netdev);
if (err)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f38c3fa7d705..b4b82b9c5cd6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -485,7 +485,7 @@ static struct dpaa_bp *dpaa_bpid2pool(int bpid)
static bool dpaa_bpid2pool_use(int bpid)
{
if (dpaa_bpid2pool(bpid)) {
- atomic_inc(&dpaa_bp_array[bpid]->refs);
+ refcount_inc(&dpaa_bp_array[bpid]->refs);
return true;
}
@@ -496,7 +496,7 @@ static bool dpaa_bpid2pool_use(int bpid)
static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
{
dpaa_bp_array[bpid] = dpaa_bp;
- atomic_set(&dpaa_bp->refs, 1);
+ refcount_set(&dpaa_bp->refs, 1);
}
static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
@@ -584,7 +584,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
if (!bp)
return;
- if (!atomic_dec_and_test(&bp->refs))
+ if (!refcount_dec_and_test(&bp->refs))
return;
if (bp->free_buf_cb)
@@ -1958,7 +1958,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* populate the rest of SGT entries */
for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- frag_len = frag->size;
+ frag_len = skb_frag_size(frag);
WARN_ON(!skb_frag_page(frag));
addr = skb_frag_dma_map(dev, frag, 0,
frag_len, dma_dir);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index af320f83c742..f7e59e8db075 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -32,6 +32,7 @@
#define __DPAA_H
#include <linux/netdevice.h>
+#include <linux/refcount.h>
#include <soc/fsl/qman.h>
#include <soc/fsl/bman.h>
@@ -99,7 +100,7 @@ struct dpaa_bp {
int (*seed_cb)(struct dpaa_bp *);
/* bpool can be emptied before freeing by this cb */
void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
- atomic_t refs;
+ refcount_t refs;
};
struct dpaa_rx_errors {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index a027f4a9d0cc..a9afe46b837f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -164,70 +164,30 @@ static const struct file_operations dpaa2_dbg_ch_ops = {
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
- if (!dpaa2_dbg_root)
- return;
+ struct dentry *dir;
/* Create a directory for the interface */
- priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
- dpaa2_dbg_root);
- if (!priv->dbg.dir) {
- netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
- return;
- }
+ dir = debugfs_create_dir(priv->net_dev->name, dpaa2_dbg_root);
+ priv->dbg.dir = dir;
/* per-cpu stats file */
- priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_cpu_ops);
- if (!priv->dbg.cpu_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_cpu_stats;
- }
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops);
/* per-fq stats file */
- priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_fq_ops);
- if (!priv->dbg.fq_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_fq_stats;
- }
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops);
/* per-fq stats file */
- priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_ch_ops);
- if (!priv->dbg.fq_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_ch_stats;
- }
-
- return;
-
-err_ch_stats:
- debugfs_remove(priv->dbg.fq_stats);
-err_fq_stats:
- debugfs_remove(priv->dbg.cpu_stats);
-err_cpu_stats:
- debugfs_remove(priv->dbg.dir);
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops);
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
{
- debugfs_remove(priv->dbg.fq_stats);
- debugfs_remove(priv->dbg.ch_stats);
- debugfs_remove(priv->dbg.cpu_stats);
- debugfs_remove(priv->dbg.dir);
+ debugfs_remove_recursive(priv->dbg.dir);
}
void dpaa2_eth_dbg_init(void)
{
dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
- if (!dpaa2_dbg_root) {
- pr_err("DPAA2-ETH: debugfs create failed\n");
- return;
- }
-
pr_debug("DPAA2-ETH: debugfs created\n");
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
index 4f63de997a26..15598b28f03b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -11,9 +11,6 @@ struct dpaa2_eth_priv;
struct dpaa2_debugfs {
struct dentry *dir;
- struct dentry *fq_stats;
- struct dentry *ch_stats;
- struct dentry *cpu_stats;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0acb11557ed1..5402867272be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1208,9 +1208,37 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
+static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
+{
+ struct dpni_taildrop td = {0};
+ int i, err;
+
+ if (priv->rx_td_enabled == enable)
+ return;
+
+ td.enable = enable;
+ td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type != DPAA2_RX_FQ)
+ continue;
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
+ priv->fq[i].flowid, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop() failed\n");
+ break;
+ }
+ }
+
+ priv->rx_td_enabled = enable;
+}
+
static int link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
+ bool tx_pause;
int err;
err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
@@ -1220,11 +1248,18 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
return err;
}
+ /* If Tx pause frame settings have changed, we need to update
+ * Rx FQ taildrop configuration as well. We configure taildrop
+ * only when pause frame generation is disabled.
+ */
+ tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
+ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
+ dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+
/* Chech link state; speed / duplex changes are not treated yet */
if (priv->link_state.up == state.up)
- return 0;
+ goto out;
- priv->link_state = state;
if (state.up) {
netif_carrier_on(priv->net_dev);
netif_tx_start_all_queues(priv->net_dev);
@@ -1236,6 +1271,9 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
netdev_info(priv->net_dev, "Link Event: state %s\n",
state.up ? "up" : "down");
+out:
+ priv->link_state = state;
+
return 0;
}
@@ -2443,6 +2481,33 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
priv->enqueue = dpaa2_eth_enqueue_fq;
}
+static int set_pause(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_link_cfg link_cfg = {0};
+ int err;
+
+ /* Get the default link options so we don't override other flags */
+ err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_get_link_cfg() failed\n");
+ return err;
+ }
+
+ /* By default, enable both Rx and Tx pause frames */
+ link_cfg.options |= DPNI_LINK_OPT_PAUSE;
+ link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_link_cfg() failed\n");
+ return err;
+ }
+
+ priv->link_state.options = link_cfg.options;
+
+ return 0;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -2498,6 +2563,13 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
set_enqueue_mode(priv);
+ /* Enable pause frame support */
+ if (dpaa2_eth_has_pause_support(priv)) {
+ err = set_pause(priv);
+ if (err)
+ goto close;
+ }
+
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
if (!priv->cls_rules)
@@ -2529,7 +2601,6 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
struct dpni_queue_id qid;
- struct dpni_taildrop td;
int err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
@@ -2554,15 +2625,6 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
return err;
}
- td.enable = 1;
- td.threshold = DPAA2_ETH_TAILDROP_THRESH;
- err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
- DPNI_QUEUE_RX, 0, fq->flowid, &td);
- if (err) {
- dev_err(dev, "dpni_set_threshold() failed\n");
- return err;
- }
-
/* xdp_rxq setup */
err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
fq->flowid);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 9af18c24221f..8a0e65b3267f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -392,6 +392,7 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_drv_stats __percpu *percpu_extras;
u16 mc_token;
+ u8 rx_td_enabled;
struct dpni_link_state link_state;
bool do_link_poll;
@@ -476,6 +477,12 @@ enum dpaa2_eth_rx_dist {
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
+#define DPNI_PAUSE_VER_MAJOR 7
+#define DPNI_PAUSE_VER_MINOR 13
+#define dpaa2_eth_has_pause_support(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
+ DPNI_PAUSE_VER_MINOR) >= 0)
+
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 7b182f4b263c..93076fe01b45 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -78,71 +78,67 @@ static int
dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *link_settings)
{
- struct dpni_link_state state = {0};
- int err = 0;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
- if (err) {
- netdev_err(net_dev, "ERROR %d getting link state\n", err);
- goto out;
- }
-
- /* At the moment, we have no way of interrogating the DPMAC
- * from the DPNI side - and for that matter there may exist
- * no DPMAC at all. So for now we just don't report anything
- * beyond the DPNI attributes.
- */
- if (state.options & DPNI_LINK_OPT_AUTONEG)
- link_settings->base.autoneg = AUTONEG_ENABLE;
- if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+ link_settings->base.autoneg = AUTONEG_DISABLE;
+ if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
link_settings->base.duplex = DUPLEX_FULL;
- link_settings->base.speed = state.rate;
+ link_settings->base.speed = priv->link_state.rate;
-out:
- return err;
+ return 0;
}
-#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
-#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
-static int
-dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
- const struct ethtool_link_ksettings *link_settings)
+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
{
- struct dpni_link_cfg cfg = {0};
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int err = 0;
+ u64 link_options = priv->link_state.options;
- /* If using an older MC version, the DPNI must be down
- * in order to be able to change link settings. Taking steps to let
- * the user know that.
- */
- if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
- DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
- if (netif_running(net_dev)) {
- netdev_info(net_dev, "Interface must be brought down first.\n");
- return -EACCES;
- }
+ pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
+ pause->tx_pause = pause->rx_pause ^
+ !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+ pause->autoneg = AUTONEG_DISABLE;
+}
+
+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg cfg = {0};
+ int err;
+
+ if (!dpaa2_eth_has_pause_support(priv)) {
+ netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
+ DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
+ return -EOPNOTSUPP;
}
- cfg.rate = link_settings->base.speed;
- if (link_settings->base.autoneg == AUTONEG_ENABLE)
- cfg.options |= DPNI_LINK_OPT_AUTONEG;
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ cfg.rate = priv->link_state.rate;
+ cfg.options = priv->link_state.options;
+ if (pause->rx_pause)
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
else
- cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
- if (link_settings->base.duplex == DUPLEX_HALF)
- cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ if (!!pause->rx_pause ^ !!pause->tx_pause)
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
else
- cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
+ if (cfg.options == priv->link_state.options)
+ return 0;
err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
- if (err)
- /* ethtool will be loud enough if we return an error; no point
- * in putting our own error message on the console by default
- */
- netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_state failed\n");
+ return err;
+ }
- return err;
+ priv->link_state.options = cfg.options;
+
+ return 0;
}
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
@@ -721,7 +717,8 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
- .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+ .get_pauseparam = dpaa2_eth_get_pauseparam,
+ .set_pauseparam = dpaa2_eth_set_pauseparam,
.get_sset_count = dpaa2_eth_get_sset_count,
.get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
.get_strings = dpaa2_eth_get_strings,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 7b44d7d9b19a..d9b6918807af 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -84,6 +84,7 @@
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
+#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -284,7 +285,7 @@ struct dpni_rsp_get_statistics {
__le64 counter[DPNI_STATISTICS_CNT];
};
-struct dpni_cmd_set_link_cfg {
+struct dpni_cmd_link_cfg {
/* cmd word 0 */
__le64 pad0;
/* cmd word 1 */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 220dfc806a24..05e30893dee6 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -838,13 +838,13 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
const struct dpni_link_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_link_cfg *cmd_params;
+ struct dpni_cmd_link_cfg *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
cmd_flags,
token);
- cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cmd_params = (struct dpni_cmd_link_cfg *)cmd.params;
cmd_params->rate = cpu_to_le32(cfg->rate);
cmd_params->options = cpu_to_le64(cfg->options);
@@ -853,6 +853,42 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_get_link_cfg() - return the link configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration from dpni object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_link_cfg *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_cmd_link_cfg *)cmd.params;
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+ cfg->options = le64_to_cpu(rsp_params->options);
+
+ return err;
+}
+
+/**
* dpni_get_link_state() - Return the link state (either up or down)
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index a521242e2353..3e8fc6c7a8da 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -485,6 +485,11 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
u16 token,
const struct dpni_link_cfg *cfg);
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg);
+
/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 04a59db03f2b..c219587bd334 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -20,6 +20,15 @@ config FSL_ENETC_VF
If compiled as module (M), the module name is fsl-enetc-vf.
+config FSL_ENETC_MDIO
+ tristate "ENETC MDIO driver"
+ depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ help
+ This driver supports NXP ENETC Central MDIO controller as a PCIe
+ physical function (PF) device.
+
+ If compiled as module (M), the module name is fsl-enetc-mdio.
+
config FSL_ENETC_PTP_CLOCK
tristate "ENETC PTP clock driver"
depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF)
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 7139e414dccf..d200c27c3bf6 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -1,19 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
+
+common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \
- enetc_mdio.o
+fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
-fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y)
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy)
-fsl-enetc-vf-objs := enetc_vf.o
-else
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \
- enetc_ethtool.o
-fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y)
-endif
+obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
+fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
-fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o
+fsl-enetc-ptp-y := enetc_ptp.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 223709443ea4..b6ff89307409 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -110,7 +110,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
int active_offloads)
{
struct enetc_tx_swbd *tx_swbd;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
union enetc_tx_bd *txbd;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 77b9cd10ba2b..149883c8f0b8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -6,18 +6,20 @@
#include <linux/iopoll.h>
#include <linux/of.h>
-#include "enetc_pf.h"
+#include "enetc_mdio.h"
-struct enetc_mdio_regs {
- u32 mdio_cfg; /* MDIO configuration and status */
- u32 mdio_ctl; /* MDIO control */
- u32 mdio_data; /* MDIO data */
- u32 mdio_addr; /* MDIO address */
-};
+#define ENETC_MDIO_REG_OFFSET 0x1c00
+#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */
+#define ENETC_MDIO_CTL 0x4 /* MDIO control */
+#define ENETC_MDIO_DATA 0x8 /* MDIO data */
+#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv)
+#define enetc_mdio_rd(hw, off) \
+ enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET)
+#define enetc_mdio_wr(hw, off, val) \
+ enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val)
+#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off)
-#define ENETC_MDIO_REG_OFFSET 0x1c00
#define ENETC_MDC_DIV 258
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
@@ -33,18 +35,18 @@ struct enetc_mdio_regs {
#define MDIO_DATA(x) ((x) & 0xffff)
#define TIMEOUT 1000
-static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs)
+static int enetc_mdio_wait_complete(struct enetc_hw *hw)
{
u32 val;
- return readx_poll_timeout(enetc_rd_reg, &regs->mdio_cfg, val,
+ return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
!(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
}
-static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
- struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr;
int ret;
@@ -59,38 +61,39 @@ static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+ enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+ enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
}
/* write the value */
- enetc_wr_reg(&regs->mdio_data, MDIO_DATA(value));
+ enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value));
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
return 0;
}
-static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
- struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr, value;
int ret;
@@ -104,41 +107,41 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+ enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+ enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
}
/* initiate the read */
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_rd_reg(&regs->mdio_cfg) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_rd_reg(&regs->mdio_data) & 0xffff;
+ value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff;
return value;
}
@@ -146,12 +149,12 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
int enetc_mdio_probe(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_regs __iomem *regs;
+ struct enetc_mdio_priv *mdio_priv;
struct device_node *np;
struct mii_bus *bus;
- int ret;
+ int err;
- bus = mdiobus_alloc_size(sizeof(regs));
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@@ -159,41 +162,31 @@ int enetc_mdio_probe(struct enetc_pf *pf)
bus->read = enetc_mdio_read;
bus->write = enetc_mdio_write;
bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
- /* store the enetc mdio base address for this bus */
- regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET;
- bus->priv = regs;
-
np = of_get_child_by_name(dev->of_node, "mdio");
if (!np) {
dev_err(dev, "MDIO node missing\n");
- ret = -EINVAL;
- goto err_registration;
+ return -EINVAL;
}
- ret = of_mdiobus_register(bus, np);
- if (ret) {
+ err = of_mdiobus_register(bus, np);
+ if (err) {
of_node_put(np);
dev_err(dev, "cannot register MDIO bus\n");
- goto err_registration;
+ return err;
}
of_node_put(np);
pf->mdio = bus;
return 0;
-
-err_registration:
- mdiobus_free(bus);
-
- return ret;
}
void enetc_mdio_remove(struct enetc_pf *pf)
{
- if (pf->mdio) {
+ if (pf->mdio)
mdiobus_unregister(pf->mdio);
- mdiobus_free(pf->mdio);
- }
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
new file mode 100644
index 000000000000..60c9a3889824
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#include <linux/phy.h>
+#include "enetc_pf.h"
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+};
+
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
new file mode 100644
index 000000000000..fbd41ce01f06
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+#include <linux/of_mdio.h>
+#include "enetc_mdio.h"
+
+#define ENETC_MDIO_DEV_ID 0xee01
+#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
+#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
+#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+
+static int enetc_pci_mdio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = &pdev->dev;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int err;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = ENETC_MDIO_BUS_NAME;
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(dev, "device enable failed\n");
+ return err;
+ }
+
+ err = pci_request_region(pdev, 0, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "pci_request_region failed\n");
+ goto err_pci_mem_reg;
+ }
+
+ hw->port = pci_iomap(pdev, 0, 0);
+ if (!hw->port) {
+ err = -ENXIO;
+ dev_err(dev, "iomap failed\n");
+ goto err_ioremap;
+ }
+
+ err = of_mdiobus_register(bus, dev->of_node);
+ if (err)
+ goto err_mdiobus_reg;
+
+ pci_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_mdiobus_reg:
+ iounmap(mdio_priv->hw->port);
+err_ioremap:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void enetc_pci_mdio_remove(struct pci_dev *pdev)
+{
+ struct mii_bus *bus = pci_get_drvdata(pdev);
+ struct enetc_mdio_priv *mdio_priv;
+
+ mdiobus_unregister(bus);
+ mdio_priv = bus->priv;
+ iounmap(mdio_priv->hw->port);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_pci_mdio_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
+
+static struct pci_driver enetc_pci_mdio_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_pci_mdio_id_table,
+ .probe = enetc_pci_mdio_probe,
+ .remove = enetc_pci_mdio_remove,
+};
+module_pci_driver(enetc_pci_mdio_driver);
+
+MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 258b3cb38a6f..7d6513ff8507 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -750,6 +750,7 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
{
struct enetc_pf *pf = enetc_si_priv(priv->si);
struct device_node *np = priv->dev->of_node;
+ struct device_node *mdio_np;
int err;
if (!np) {
@@ -773,7 +774,9 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
priv->phy_node = of_node_get(np);
}
- if (!of_phy_is_fixed_link(np)) {
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ of_node_put(mdio_np);
err = enetc_mdio_probe(pf);
if (err) {
of_node_put(priv->phy_node);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e5610a4da539..d4d4c72adf49 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -208,8 +208,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_ST_C45 (0)
#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_READ_C45 (3 << 28)
#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_OP_ADDR_WRITE (0)
#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
#define FEC_MMFR_TA (2 << 16)
@@ -365,7 +368,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- frag_len = skb_shinfo(skb)->frags[frag].size;
+ frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
/* Handle the last BD specially */
if (frag == nr_frags - 1) {
@@ -387,7 +390,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+ bufaddr = skb_frag_address(this_frag);
index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align ||
@@ -1767,7 +1770,8 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret = 0;
+ int ret = 0, frame_start, frame_addr, frame_op;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
@@ -1775,9 +1779,37 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
reinit_completion(&fep->mdio_done);
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ frame_op = FEC_MMFR_OP_READ_C45;
+
+ } else {
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
/* start a read op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
@@ -1804,7 +1836,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret;
+ int ret, frame_start, frame_addr;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
@@ -1814,9 +1847,33 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
reinit_completion(&fep->mdio_done);
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ } else {
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
/* start a write op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
FEC_MMFR_TA | FEC_MMFR_DATA(value),
fep->hwp + FEC_MII_DATA);
@@ -1828,6 +1885,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
ret = -ETIMEDOUT;
}
+out:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -3338,7 +3396,6 @@ fec_probe(struct platform_device *pdev)
struct fec_platform_data *pdata;
struct net_device *ndev;
int i, irq, ret = 0;
- struct resource *r;
const struct of_device_id *of_id;
static int dev_id;
struct device_node *np = pdev->dev.of_node, *phy_node;
@@ -3378,8 +3435,7 @@ fec_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+ fep->hwp = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fep->hwp)) {
ret = PTR_ERR(fep->hwp);
goto failed_ioremap;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5fad73b2e123..3981c06f082f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -501,7 +501,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
nr_frags = skb_shinfo(skb)->nr_frags;
frag = skb_shinfo(skb)->frags;
for (i = 0; i < nr_frags; i++, frag++) {
- if (!IS_ALIGNED(frag->page_offset, 4)) {
+ if (!IS_ALIGNED(skb_frag_off(frag), 4)) {
is_aligned = 0;
break;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7ea19e173339..412c0340fed9 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2005,8 +2005,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
- if (rx_queue->skb)
- dev_kfree_skb(rx_queue->skb);
+ dev_kfree_skb(rx_queue->skb);
for (i = 0; i < rx_queue->rx_ring_size; i++) {
struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 689f18e3100f..90ab7ade44c4 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -877,7 +877,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq <= 0) {
- dev_err(dev, "No irq resource\n");
ret = -ENODEV;
goto out_disconnect_phy;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 349970557c52..95a6b0926170 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -719,7 +719,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int len = frag->size;
+ int len = skb_frag_size(frag);
addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
ret = dma_mapping_error(priv->dev, addr);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 2235dd55fab2..a48396dd4ebb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -245,7 +245,7 @@ static int hns_nic_maybe_stop_tso(
int frag_num;
struct sk_buff *skb = *out_skb;
struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
size = skb_headlen(skb);
buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
@@ -309,7 +309,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
struct hnae_ring *ring = ring_data->ring;
struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int buf_num;
int seg_num;
dma_addr_t dma;
@@ -1182,6 +1182,8 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
+ phy_attached_info(phy_dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 75329ab775a6..f8a87f8ca983 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -47,6 +47,8 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
+ HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
};
/* below are per-VF mac-vlan subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 908d4f45c06a..528f6243cdc6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -46,7 +46,7 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
EXPORT_SYMBOL(hnae3_set_client_init_flag);
static int hnae3_get_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
+ struct hnae3_ae_dev *ae_dev)
{
int inited = 0;
@@ -104,7 +104,6 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
- int ret = 0;
if (!client)
return -ENODEV;
@@ -123,7 +122,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_init_client_instance(client, ae_dev);
+ int ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
@@ -164,7 +163,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
- int ret = 0;
+ int ret;
if (!ae_algo)
return;
@@ -258,7 +257,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0;
+ int ret;
if (!ae_dev)
return -ENODEV;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 48c7b70fc2c4..c4b7bf851a28 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -58,10 +58,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
- hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
- hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
@@ -85,13 +85,18 @@ struct hnae3_queue {
void __iomem *io_base;
struct hnae3_ae_algo *ae_algo;
struct hnae3_handle *handle;
- int tqp_index; /* index in a handle */
- u32 buf_size; /* size for hnae_desc->addr, preset by AE */
- u16 tx_desc_num;/* total number of tx desc */
- u16 rx_desc_num;/* total number of rx desc */
+ int tqp_index; /* index in a handle */
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 tx_desc_num; /* total number of tx desc */
+ u16 rx_desc_num; /* total number of rx desc */
};
-/*hnae3 loop mode*/
+struct hns3_mac_stats {
+ u64 tx_pause_cnt;
+ u64 rx_pause_cnt;
+};
+
+/* hnae3 loop mode */
enum hnae3_loop {
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
@@ -141,6 +146,12 @@ enum hnae3_reset_notify_type {
HNAE3_RESTORE_CLIENT,
};
+enum hnae3_hw_error_type {
+ HNAE3_PPU_POISON_ERROR,
+ HNAE3_CMDQ_ECC_ERROR,
+ HNAE3_IMP_RD_POISON_ERROR,
+};
+
enum hnae3_reset_type {
HNAE3_VF_RESET,
HNAE3_VF_FUNC_RESET,
@@ -179,6 +190,15 @@ struct hnae3_vector_info {
#define HNAE3_RING_GL_RX 0
#define HNAE3_RING_GL_TX 1
+#define HNAE3_FW_VERSION_BYTE3_SHIFT 24
+#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_FW_VERSION_BYTE2_SHIFT 16
+#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_FW_VERSION_BYTE1_SHIFT 8
+#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
+#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
@@ -196,7 +216,8 @@ struct hnae3_client_ops {
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
- enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
+ void (*process_hw_error)(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -289,6 +310,8 @@ struct hnae3_ae_dev {
* Remove multicast address from mac table
* update_stats()
* Update Old network device statistics
+ * get_mac_stats()
+ * get mac pause statistics including tx_cnt and rx_cnt
* get_ethtool_stats()
* Get ethtool network device statistics
* get_strings()
@@ -417,8 +440,8 @@ struct hnae3_ae_ops {
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae3_handle *handle, u64 *data);
- void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt);
+ void (*get_mac_stats)(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats);
void (*get_strings)(struct hnae3_handle *handle,
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
@@ -605,7 +628,7 @@ struct hnae3_handle {
struct pci_dev *pdev;
void *priv;
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
- u64 flags; /* Indicate the capabilities for this handle*/
+ u64 flags; /* Indicate the capabilities for this handle */
union {
struct net_device *netdev; /* first member */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index a4b937286f55..5cf4c1ecc5b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -8,6 +8,7 @@
#include "hns3_enet.h"
#define HNS3_DBG_READ_LEN 256
+#define HNS3_DBG_WRITE_LEN 1024
static struct dentry *hns3_dbgfs_root;
@@ -38,7 +39,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
if (queue_num >= h->kinfo.num_tqps) {
dev_err(&h->pdev->dev,
- "Queue number(%u) is out of range(%u)\n", queue_num,
+ "Queue number(%u) is out of range(0-%u)\n", queue_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
@@ -176,7 +177,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
}
if (q_num >= h->kinfo.num_tqps) {
- dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num,
+ dev_err(dev, "Queue number(%u) is out of range(0-%u)\n", q_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
@@ -187,14 +188,14 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
tx_index = (cnt == 1) ? value : tx_index;
if (tx_index >= ring->desc_num) {
- dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index,
+ dev_err(dev, "bd index(%u) is out of range(0-%u)\n", tx_index,
ring->desc_num - 1);
return -EINVAL;
}
tx_desc = &ring->desc[tx_index];
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
- dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr);
+ dev_info(dev, "(TX)addr: 0x%llx\n", tx_desc->addr);
dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
@@ -218,6 +219,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr);
+ dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info);
dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
@@ -237,16 +239,16 @@ static void hns3_dbg_help(struct hnae3_handle *h)
char printf_buf[HNS3_DBG_BUF_LEN];
dev_info(&h->pdev->dev, "available commands\n");
- dev_info(&h->pdev->dev, "queue info [number]\n");
+ dev_info(&h->pdev->dev, "queue info <number>\n");
dev_info(&h->pdev->dev, "queue map\n");
- dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
+ dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
if (!hns3_is_phys_func(h->pdev))
return;
dev_info(&h->pdev->dev, "dump fd tcam\n");
dev_info(&h->pdev->dev, "dump tc\n");
- dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
+ dev_info(&h->pdev->dev, "dump tm map <q_num>\n");
dev_info(&h->pdev->dev, "dump tm\n");
dev_info(&h->pdev->dev, "dump qos pause cfg\n");
dev_info(&h->pdev->dev, "dump qos pri map\n");
@@ -258,20 +260,20 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump mac tnl status\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
- strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
+ strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
HNS3_DBG_BUF_LEN - 1);
strncat(printf_buf + strlen(printf_buf),
- " [igu egu <prt_id>] [rpu <tc_queue_num>]",
+ " [igu egu <port_id>] [rpu <tc_queue_num>]",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
strncat(printf_buf + strlen(printf_buf),
- " [rtc] [ppp] [rcb] [tqp <q_num>]]\n",
+ " [rtc] [ppp] [rcb] [tqp <queue_num>]]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
- strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]",
+ strncat(printf_buf, "dump reg dcb <port_id> <pri_id> <pg_id>",
HNS3_DBG_BUF_LEN - 1);
- strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n",
+ strncat(printf_buf + strlen(printf_buf), " <rq_id> <nq_id> <qset_id>\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
}
@@ -322,6 +324,9 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
+ if (count > HNS3_DBG_WRITE_LEN)
+ return -ENOSPC;
+
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
if (!cmd_buf)
return count;
@@ -372,20 +377,11 @@ static const struct file_operations hns3_dbg_cmd_fops = {
void hns3_dbg_init(struct hnae3_handle *handle)
{
const char *name = pci_name(handle->pdev);
- struct dentry *pfile;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
- if (!handle->hnae3_dbgfs)
- return;
- pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
- &hns3_dbg_cmd_fops);
- if (!pfile) {
- debugfs_remove_recursive(handle->hnae3_dbgfs);
- handle->hnae3_dbgfs = NULL;
- dev_warn(&handle->pdev->dev, "create file for %s fail\n",
- name);
- }
+ debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+ &hns3_dbg_cmd_fops);
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
@@ -397,10 +393,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
{
hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
- if (!hns3_dbgfs_root) {
- pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
- return;
- }
}
void hns3_dbg_unregister_debugfs(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 310afa708831..9f3f8e3cb2c2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -28,6 +28,12 @@
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
+#define hns3_rl_err(fmt, ...) \
+ do { \
+ if (net_ratelimit()) \
+ netdev_err(fmt, ##__VA_ARGS__); \
+ } while (0)
+
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);
@@ -45,6 +51,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+#define HNS3_INNER_VLAN_TAG 1
+#define HNS3_OUTER_VLAN_TAG 2
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -220,9 +229,9 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
*/
-
- /* Default: enable interrupt coalescing self-adaptive and GL */
tqp_vector->tx_group.coal.gl_adapt_enable = 1;
tqp_vector->rx_group.coal.gl_adapt_enable = 1;
@@ -459,6 +468,9 @@ static int hns3_nic_net_open(struct net_device *netdev)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
hns3_config_xps(priv);
+
+ netif_dbg(h, drv, netdev, "net open\n");
+
return 0;
}
@@ -519,6 +531,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return 0;
+ netif_dbg(h, drv, netdev, "net stop\n");
+
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
@@ -956,16 +970,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
}
-static int hns3_fill_desc_vtags(struct sk_buff *skb,
- struct hns3_enet_ring *tx_ring,
- u32 *inner_vlan_flag,
- u32 *out_vlan_flag,
- u16 *inner_vtag,
- u16 *out_vtag)
+static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
+ struct sk_buff *skb)
{
-#define HNS3_TX_VLAN_PRIO_SHIFT 13
-
struct hnae3_handle *handle = tx_ring->tqp->handle;
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ if (!(skb->protocol == htons(ETH_P_8021Q) ||
+ skb_vlan_tag_present(skb)))
+ return 0;
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
* header is allowed in skb, otherwise it will cause RAS error.
@@ -976,8 +990,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
return -EINVAL;
if (skb->protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->tqp->handle->kinfo.netdev->features &
- NETIF_F_HW_VLAN_CTAG_TX)) {
+ !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
/* When HW VLAN acceleration is turned off, and the stack
* sets the protocol to 802.1q, the driver just need to
* set the protocol to the encapsulated ethertype.
@@ -987,45 +1000,107 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
if (skb_vlan_tag_present(skb)) {
- u16 vlan_tag;
-
- vlan_tag = skb_vlan_tag_get(skb);
- vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
-
/* Based on hw strategy, use out_vtag in two layer tag case,
* and use inner_vtag in one tag case.
*/
- if (skb->protocol == htons(ETH_P_8021Q)) {
- if (handle->port_base_vlan_state ==
- HNAE3_PORT_BASE_VLAN_DISABLE){
- hns3_set_field(*out_vlan_flag,
- HNS3_TXD_OVLAN_B, 1);
- *out_vtag = vlan_tag;
- } else {
- hns3_set_field(*inner_vlan_flag,
- HNS3_TXD_VLAN_B, 1);
- *inner_vtag = vlan_tag;
- }
- } else {
- hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
- *inner_vtag = vlan_tag;
- }
- } else if (skb->protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *vhdr;
- int rc;
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ rc = HNS3_OUTER_VLAN_TAG;
+ else
+ rc = HNS3_INNER_VLAN_TAG;
- rc = skb_cow_head(skb, 0);
- if (unlikely(rc < 0))
- return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
- vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
- << HNS3_TX_VLAN_PRIO_SHIFT);
+ skb->protocol = vlan_get_protocol(skb);
+ return rc;
}
+ rc = skb_cow_head(skb, 0);
+ if (unlikely(rc < 0))
+ return rc;
+
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
+ & VLAN_PRIO_MASK);
+
skb->protocol = vlan_get_protocol(skb);
return 0;
}
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc)
+{
+ u32 ol_type_vlan_len_msec = 0;
+ u32 type_cs_vlan_tso = 0;
+ u32 paylen = skb->len;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
+ u16 mss = 0;
+ int ret;
+
+ ret = hns3_handle_vtags(ring, skb);
+ if (unlikely(ret < 0)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_vlan_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ } else if (ret == HNS3_INNER_VLAN_TAG) {
+ inner_vtag = skb_vlan_tag_get(skb);
+ inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ } else if (ret == HNS3_OUTER_VLAN_TAG) {
+ out_vtag = skb_vlan_tag_get(skb);
+ out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ 1);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 ol4_proto, il4_proto;
+
+ skb_reset_mac_len(skb);
+
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l4_proto_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l2l3l4_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &paylen, &mss,
+ &type_cs_vlan_tso);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_tso_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+ }
+
+ /* Set txbd */
+ desc->tx.ol_type_vlan_len_msec =
+ cpu_to_le32(ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
+ desc->tx.paylen = cpu_to_le32(paylen);
+ desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+
+ return 0;
+}
+
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
unsigned int size, int frag_end,
enum hns_desc_type type)
@@ -1033,65 +1108,29 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
dma_addr_t dma;
if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
- u32 ol_type_vlan_len_msec = 0;
- u32 type_cs_vlan_tso = 0;
- u32 paylen = skb->len;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
- u16 mss = 0;
int ret;
- ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
- &ol_type_vlan_len_msec,
- &inner_vtag, &out_vtag);
+ ret = hns3_fill_skb_desc(ring, skb, desc);
if (unlikely(ret))
return ret;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 ol4_proto, il4_proto;
-
- skb_reset_mac_len(skb);
-
- ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret))
- return ret;
-
- ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- if (unlikely(ret))
- return ret;
-
- ret = hns3_set_tso(skb, &paylen, &mss,
- &type_cs_vlan_tso);
- if (unlikely(ret))
- return ret;
- }
-
- /* Set txbd */
- desc->tx.ol_type_vlan_len_msec =
- cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen = cpu_to_le32(paylen);
- desc->tx.mss = cpu_to_le16(mss);
- desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
- desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
-
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else {
- frag = (struct skb_frag_struct *)priv;
+ frag = (skb_frag_t *)priv;
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
if (unlikely(dma_mapping_error(dev, dma))) {
+ u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
@@ -1147,28 +1186,20 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return 0;
}
-static int hns3_nic_bd_num(struct sk_buff *skb)
+static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
{
- int size = skb_headlen(skb);
- int i, bd_num;
+ unsigned int bd_num;
+ int i;
/* if the total len is within the max bd limit */
if (likely(skb->len <= HNS3_MAX_BD_SIZE))
return skb_shinfo(skb)->nr_frags + 1;
- bd_num = hns3_tx_bd_count(size);
+ bd_num = hns3_tx_bd_count(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- int frag_bd_num;
-
- size = skb_frag_size(frag);
- frag_bd_num = hns3_tx_bd_count(size);
-
- if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
- return -ENOMEM;
-
- bd_num += frag_bd_num;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ bd_num += hns3_tx_bd_count(skb_frag_size(frag));
}
return bd_num;
@@ -1189,7 +1220,7 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
*/
static bool hns3_skb_need_linearized(struct sk_buff *skb)
{
- int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+ int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
unsigned int tot_len = 0;
int i;
@@ -1219,21 +1250,16 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- int bd_num;
+ unsigned int bd_num;
bd_num = hns3_nic_bd_num(skb);
- if (bd_num < 0)
- return bd_num;
-
- if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
struct sk_buff *new_skb;
- if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+ if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
+ !hns3_skb_need_linearized(skb))
goto out;
- bd_num = hns3_tx_bd_count(skb->len);
- if (unlikely(ring_space(ring) < bd_num))
- return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
if (!new_skb)
@@ -1241,6 +1267,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+ bd_num = hns3_nic_bd_num(new_skb);
+ if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
+ (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
+ return -ENOMEM;
+
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
u64_stats_update_end(&ring->syncp);
@@ -1290,7 +1321,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
&tx_ring_data(priv, skb->queue_mapping);
struct hns3_enet_ring *ring = ring_data->ring;
struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int next_to_use_head;
int buf_num;
int seg_num;
@@ -1314,9 +1345,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_end(&ring->syncp);
}
- if (net_ratelimit())
- netdev_err(netdev, "xmit error: %d!\n", buf_num);
-
+ hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
}
@@ -1482,7 +1511,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
tx_drop += ring->stats.sw_err_cnt;
+ tx_drop += ring->stats.tx_vlan_err;
+ tx_drop += ring->stats.tx_l4_proto_err;
+ tx_drop += ring->stats.tx_l2l3l4_err;
+ tx_drop += ring->stats.tx_tso_err;
tx_errors += ring->stats.sw_err_cnt;
+ tx_errors += ring->stats.tx_vlan_err;
+ tx_errors += ring->stats.tx_l4_proto_err;
+ tx_errors += ring->stats.tx_l2l3l4_err;
+ tx_errors += ring->stats.tx_tso_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1550,6 +1587,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
h = hns3_get_handle(netdev);
kinfo = &h->kinfo;
+ netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
+
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
}
@@ -1593,6 +1632,10 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
+ netif_dbg(h, drv, netdev,
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
+ vf, vlan, qos, vlan_proto);
+
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
qos, vlan_proto);
@@ -1611,6 +1654,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
+ netif_dbg(h, drv, netdev,
+ "change mtu from %u to %d\n", netdev->mtu, new_mtu);
+
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1680,15 +1726,12 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
/* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout
*/
- if (h->ae_algo->ops->update_stats &&
- h->ae_algo->ops->get_mac_pause_stats) {
- u64 tx_pause_cnt, rx_pause_cnt;
+ if (h->ae_algo->ops->get_mac_stats) {
+ struct hns3_mac_stats mac_stats;
- h->ae_algo->ops->update_stats(h, &ndev->stats);
- h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
- &rx_pause_cnt);
+ h->ae_algo->ops->get_mac_stats(h, &mac_stats);
netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
- tx_pause_cnt, rx_pause_cnt);
+ mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
}
hw_head = readl_relaxed(tx_ring->tqp->io_base +
@@ -1963,7 +2006,7 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
ops = ae_dev->ops;
/* request the reset */
- if (ops->reset_event) {
+ if (ops->reset_event && ops->get_reset_level) {
if (ae_dev->hw_err_reset_req) {
reset_type = ops->get_reset_level(ae_dev,
&ae_dev->hw_err_reset_req);
@@ -2067,7 +2110,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae3_page_order(ring);
+ unsigned int order = hns3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -2078,7 +2121,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae3_page_size(ring);
+ cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -2357,8 +2400,9 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
- netdev_err(ring->tqp->handle->kinfo.netdev,
- "hnae reserve buffer map failed.\n");
+ hns3_rl_err(ring->tqp_vector->napi.dev,
+ "alloc rx buffer failed: %d\n",
+ ret);
break;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -2381,7 +2425,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int size = le16_to_cpu(desc->rx.size);
- u32 truesize = hnae3_buf_size(ring);
+ u32 truesize = hns3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2396,7 +2440,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
- if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
+ if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given */
get_page(desc_cb->priv);
@@ -2443,9 +2487,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
&iph->daddr, 0);
} else {
- netdev_err(skb->dev,
- "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
- be16_to_cpu(type), depth);
+ hns3_rl_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
return -EFAULT;
}
@@ -2587,7 +2631,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
skb = ring->skb;
if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
+ hns3_rl_err(netdev, "alloc rx skb fail\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
@@ -2662,8 +2706,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
HNS3_RX_HEAD_SIZE);
if (unlikely(!new_skb)) {
- netdev_err(ring->tqp->handle->kinfo.netdev,
- "alloc rx skb frag fail\n");
+ hns3_rl_err(ring->tqp_vector->napi.dev,
+ "alloc rx fraglist skb fail\n");
return -ENXIO;
}
ring->frag_num = 0;
@@ -2678,7 +2722,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
}
if (ring->tail_skb) {
- head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->truesize += hns3_buf_size(ring);
head_skb->data_len += le16_to_cpu(desc->rx.size);
head_skb->len += le16_to_cpu(desc->rx.size);
skb = ring->tail_skb;
@@ -2895,24 +2939,22 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
- int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
- int num;
+ int recv_pkts = 0;
+ int recv_bds = 0;
+ int err, num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
- recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
- if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns3_nic_alloc_rx_buffers(ring,
- clean_count + unused_count);
- clean_count = 0;
+ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
unused_count = hns3_desc_unused(ring) -
ring->pending_buf;
}
@@ -2926,7 +2968,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
goto out;
} else if (unlikely(err)) { /* Do jump the err */
recv_bds += ring->pending_buf;
- clean_count += ring->pending_buf;
+ unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
continue;
@@ -2934,7 +2976,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
- clean_count += ring->pending_buf;
+ unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
@@ -2943,8 +2985,8 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
out:
/* Make all data has been write before submit */
- if (clean_count + unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
+ if (unused_count > 0)
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
return recv_pkts;
}
@@ -3574,7 +3616,7 @@ out:
return ret;
}
-static void hns3_fini_ring(struct hns3_enet_ring *ring)
+void hns3_fini_ring(struct hns3_enet_ring *ring)
{
hns3_free_desc(ring);
devm_kfree(ring_to_dev(ring), ring->desc_cb);
@@ -4165,8 +4207,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
static void hns3_store_coal(struct hns3_nic_priv *priv)
{
/* ethtool only support setting and querying one coal
- * configuation for now, so save the vector 0' coal
- * configuation here in order to restore it.
+ * configuration for now, so save the vector 0' coal
+ * configuration here in order to restore it.
*/
memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
sizeof(struct hns3_enet_coalesce));
@@ -4378,6 +4420,9 @@ int hns3_set_channels(struct net_device *netdev,
u16 org_tqp_num;
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (ch->rx_count || ch->tx_count)
return -EINVAL;
@@ -4392,6 +4437,10 @@ int hns3_set_channels(struct net_device *netdev,
if (kinfo->rss_size == new_tqp_num)
return 0;
+ netif_dbg(h, drv, netdev,
+ "set channels: tqp_num=%u, rxfh=%d\n",
+ new_tqp_num, rxfh_configured);
+
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
@@ -4421,12 +4470,36 @@ int hns3_set_channels(struct net_device *netdev,
return hns3_reset_notify(h, HNAE3_UP_CLIENT);
}
+static const struct hns3_hw_error_info hns3_hw_err[] = {
+ { .type = HNAE3_PPU_POISON_ERROR,
+ .msg = "PPU poison" },
+ { .type = HNAE3_CMDQ_ECC_ERROR,
+ .msg = "IMP CMDQ error" },
+ { .type = HNAE3_IMP_RD_POISON_ERROR,
+ .msg = "IMP RD poison" },
+};
+
+static void hns3_process_hw_error(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
+ if (hns3_hw_err[i].type == type) {
+ dev_err(&handle->pdev->dev, "Detected %s!\n",
+ hns3_hw_err[i].msg);
+ break;
+ }
+ }
+}
+
static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
.setup_tc = hns3_client_setup_tc,
.reset_notify = hns3_reset_notify,
+ .process_hw_error = hns3_process_hw_error,
};
/* hns3_init_module - Driver registration routine
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 848b866761df..2110fa3b4479 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -75,7 +75,7 @@ enum hns3_nic_state {
#define HNS3_TX_TIMEOUT (5 * HZ)
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
-#define HNS3_RING_MAX_PENDING 32768
+#define HNS3_RING_MAX_PENDING 32760
#define HNS3_RING_MIN_PENDING 24
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
@@ -195,7 +195,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_PER_FRAG 8
+#define HNS3_MAX_BD_NUM_NORMAL 8
+#define HNS3_MAX_BD_NUM_TSO 63
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
#define HNS3_VECTOR_GL0_OFFSET 0x100
@@ -301,7 +302,7 @@ struct hns3_desc_cb {
dma_addr_t dma; /* dma address of this desc */
void *buf; /* cpu addr for a desc */
- /* priv data for the desc, e.g. skb when use with ip stack*/
+ /* priv data for the desc, e.g. skb when use with ip stack */
void *priv;
u32 page_offset;
u32 length; /* length of the buffer */
@@ -324,11 +325,11 @@ enum hns3_pkt_l3type {
HNS3_L3_TYPE_MAC_PAUSE,
HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
- /* reserved for 0xA~0xB*/
+ /* reserved for 0xA~0xB */
HNS3_L3_TYPE_CNM = 0xc,
- /* reserved for 0xD~0xE*/
+ /* reserved for 0xD~0xE */
HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
@@ -353,7 +354,7 @@ enum hns3_pkt_ol3type {
HNS3_OL3_TYPE_IPV4_OPT = 4,
HNS3_OL3_TYPE_IPV6_EXT,
- /* reserved for 0x6~0xE*/
+ /* reserved for 0x6~0xE */
HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
@@ -377,6 +378,10 @@ struct ring_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
+ u64 tx_vlan_err;
+ u64 tx_l4_proto_err;
+ u64 tx_l2l3l4_err;
+ u64 tx_tso_err;
};
struct {
u64 rx_pkts;
@@ -547,6 +552,11 @@ union l4_hdr_info {
unsigned char *hdr;
};
+struct hns3_hw_error_info {
+ enum hnae3_hw_error_type type;
+ const char *msg;
+};
+
static inline int ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
@@ -608,9 +618,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae3_buf_size(_ring) ((_ring)->buf_size)
-#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
-#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring))
+#define hns3_buf_size(_ring) ((_ring)->buf_size)
+
+static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->buf_size > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
@@ -633,6 +652,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
+void hns3_fini_ring(struct hns3_enet_ring *ring);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
bool hns3_is_phys_func(struct pci_dev *pdev);
int hns3_clean_rx_ring(
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 5bff98a9b0dc..c52eccc1621d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -30,6 +30,10 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
+ HNS3_TQP_STAT("vlan_err", tx_vlan_err),
+ HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
+ HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
+ HNS3_TQP_STAT("tso_err", tx_tso_err),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -55,7 +59,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 3
+#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -85,6 +89,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_SERIAL_SERDES:
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
+ case HNAE3_LOOP_PHY:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -92,7 +97,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
break;
}
- if (ret)
+ if (ret || h->pdev->revision >= 0x21)
return ret;
if (en) {
@@ -139,7 +144,10 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
static void hns3_lp_setup_skb(struct sk_buff *skb)
{
+#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
+
struct net_device *ndev = skb->dev;
+ struct hnae3_handle *handle;
unsigned char *packet;
struct ethhdr *ethh;
unsigned int i;
@@ -155,7 +163,9 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
- ethh->h_dest[5] += 0x1f;
+ handle = hns3_get_handle(ndev);
+ if (handle->pdev->revision == 0x20)
+ ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
skb_reset_mac_header(skb);
@@ -311,6 +321,8 @@ static void hns3_self_test(struct net_device *ndev,
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
+ netif_dbg(h, drv, ndev, "self test start");
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -324,6 +336,10 @@ static void hns3_self_test(struct net_device *ndev,
st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
+ st_param[HNAE3_LOOP_PHY][1] =
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
+
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -374,6 +390,8 @@ static void hns3_self_test(struct net_device *ndev,
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
+
+ netif_dbg(h, drv, ndev, "self test end\n");
}
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
@@ -527,6 +545,7 @@ static void hns3_get_drvinfo(struct net_device *netdev,
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ u32 fw_version;
if (!h->ae_algo->ops->get_fw_version) {
netdev_err(netdev, "could not get fw version!\n");
@@ -545,8 +564,18 @@ static void hns3_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->bus_info));
drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- priv->ae_handle->ae_algo->ops->get_fw_version(h));
+ fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%lu.%lu.%lu.%lu",
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
}
static u32 hns3_get_link(struct net_device *netdev)
@@ -593,6 +622,10 @@ static int hns3_set_pauseparam(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ netif_dbg(h, drv, netdev,
+ "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
+ param->autoneg, param->rx_pause, param->tx_pause);
+
if (h->ae_algo->ops->set_pauseparam)
return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
param->rx_pause,
@@ -612,7 +645,7 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
&cmd->base.speed,
&cmd->base.duplex);
- /* 2.get link mode*/
+ /* 2.get link mode */
if (ops->get_link_mode)
ops->get_link_mode(h,
cmd->link_modes.supported,
@@ -681,7 +714,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static int hns3_check_ksettings_param(struct net_device *netdev,
+static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
@@ -726,12 +759,17 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- int ret = 0;
+ int ret;
/* Chip don't support this mode. */
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ netif_dbg(handle, drv, netdev,
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ netdev->phydev ? "phy" : "mac",
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
@@ -843,8 +881,8 @@ static int hns3_get_rxnfc(struct net_device *netdev,
}
}
-static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
- u32 tx_desc_num, u32 rx_desc_num)
+static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
+ u32 tx_desc_num, u32 rx_desc_num)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -857,21 +895,29 @@ static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
rx_desc_num;
}
-
- return hns3_init_all_ring(priv);
}
-static int hns3_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- int queue_num = h->kinfo.num_tqps;
- int ret;
+ struct hnae3_handle *handle = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ int i;
+
+ tmp_rings = kcalloc(handle->kinfo.num_tqps * 2,
+ sizeof(struct hns3_enet_ring), GFP_KERNEL);
+ if (!tmp_rings)
+ return NULL;
+ for (i = 0; i < handle->kinfo.num_tqps * 2; i++)
+ memcpy(&tmp_rings[i], priv->ring_data[i].ring,
+ sizeof(struct hns3_enet_ring));
+
+ return tmp_rings;
+}
+
+static int hns3_check_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
if (hns3_nic_resetting(ndev))
return -EBUSY;
@@ -887,6 +933,25 @@ static int hns3_set_ringparam(struct net_device *ndev,
return -EINVAL;
}
+ return 0;
+}
+
+static int hns3_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ bool if_running = netif_running(ndev);
+ u32 old_tx_desc_num, new_tx_desc_num;
+ u32 old_rx_desc_num, new_rx_desc_num;
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret, i;
+
+ ret = hns3_check_ringparam(ndev, param);
+ if (ret)
+ return ret;
+
/* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
@@ -896,6 +961,13 @@ static int hns3_set_ringparam(struct net_device *ndev,
old_rx_desc_num == new_rx_desc_num)
return 0;
+ tmp_rings = hns3_backup_ringparam(priv);
+ if (!tmp_rings) {
+ netdev_err(ndev,
+ "backup ring param failed by allocating memory fail\n");
+ return -ENOMEM;
+ }
+
netdev_info(ndev,
"Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
old_tx_desc_num, old_rx_desc_num,
@@ -904,22 +976,24 @@ static int hns3_set_ringparam(struct net_device *ndev,
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- return ret;
-
- ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num,
- new_rx_desc_num);
+ hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ ret = hns3_init_all_ring(priv);
if (ret) {
- ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
- if (ret) {
- netdev_err(ndev,
- "Revert to old bd num fail, ret=%d.\n", ret);
- return ret;
- }
+ netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ ret);
+
+ hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
+ old_rx_desc_num);
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ memcpy(priv->ring_data[i].ring, &tmp_rings[i],
+ sizeof(struct hns3_enet_ring));
+ } else {
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ hns3_fini_ring(&tmp_rings[i]);
}
+ kfree(tmp_rings);
+
if (if_running)
ret = ndev->netdev_ops->ndo_open(ndev);
@@ -973,6 +1047,9 @@ static int hns3_nway_reset(struct net_device *netdev)
return -EINVAL;
}
+ netif_dbg(handle, drv, netdev,
+ "nway reset (using %s)\n", phy ? "phy" : "mac");
+
if (phy)
return genphy_restart_aneg(phy);
@@ -1297,6 +1374,9 @@ static int hns3_set_fecparam(struct net_device *netdev,
if (!ops->set_fec)
return -EOPNOTSUPP;
fec_mode = eth_to_loc_fec(fec->fec);
+
+ netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
+
return ops->set_fec(handle, fec_mode);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 22f6acd45d9a..ecf58cfd253d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -103,14 +103,17 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
dma_addr_t dma = ring->desc_dma_addr;
struct hclge_dev *hdev = ring->dev;
struct hclge_hw *hw = &hdev->hw;
+ u32 reg_val;
if (ring->ring_type == HCLGE_TYPE_CSQ) {
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
upper_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
- ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
+ reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
@@ -383,6 +386,23 @@ err_csq:
return ret;
}
+static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+{
+ struct hclge_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
+
+ req = (struct hclge_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+ req->compat = cpu_to_le32(compat);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
int hclge_cmd_init(struct hclge_dev *hdev)
{
u32 version;
@@ -419,7 +439,24 @@ int hclge_cmd_init(struct hclge_dev *hdev)
}
hdev->fw_version = version;
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_firmware_compat_config(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 96840d8f3e24..4821fe08b5e4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -86,6 +86,8 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
@@ -221,6 +223,9 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+ /* MAC VLAN commands */
+ HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
+
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
@@ -257,6 +262,7 @@ enum hclge_opcode_type {
/* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013,
+ HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_INFO = 0x7104,
@@ -586,6 +592,12 @@ struct hclge_config_mac_mode_cmd {
u8 rsv[20];
};
+struct hclge_pf_rst_sync_cmd {
+#define HCLGE_PF_RST_ALL_VF_RDY_B 0
+ u8 all_vf_ready;
+ u8 rsv[23];
+};
+
#define HCLGE_CFG_SPEED_S 0
#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
@@ -762,6 +774,31 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
+#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
+#define HCLGE_SWITCH_ALW_LPBK_B 1U
+#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U
+#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U
+#define HCLGE_SWITCH_NO_MASK 0x0
+#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE
+#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD
+#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB
+#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7
+
+struct hclge_mac_vlan_switch_cmd {
+ u8 roce_sel;
+ u8 rsv1[3];
+ __le32 func_id;
+ u8 switch_param;
+ u8 rsv2[3];
+ u8 param_mask;
+ u8 rsv3[11];
+};
+
+enum hclge_mac_vlan_cfg_sel {
+ HCLGE_MAC_VLAN_NIC_SEL = 0,
+ HCLGE_MAC_VLAN_ROCE_SEL,
+};
+
#define HCLGE_ACCEPT_TAG1_B 0
#define HCLGE_ACCEPT_UNTAG1_B 1
#define HCLGE_PORT_INS_TAG1_EN_B 2
@@ -827,7 +864,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- u8 mac_add[6];
+ u8 mac_addr[6];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
@@ -877,6 +914,13 @@ struct hclge_reset_cmd {
u8 rsv[22];
};
+#define HCLGE_PF_RESET_DONE_BIT BIT(0)
+
+struct hclge_pf_rst_done_cmd {
+ u8 pf_rst_done;
+ u8 rsv[23];
+};
+
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
#define HCLGE_CMD_SERDES_DONE_B BIT(0)
@@ -906,8 +950,11 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
-#define HCLGE_NIC_CMQ_EN_B 16
-#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B)
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_NIC_SW_RST_RDY_B 16
+#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
+
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
@@ -1009,6 +1056,13 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
+#define HCLGE_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
+struct hclge_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index bac4ce13f6ae..816f92084138 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -198,9 +198,32 @@ static int hclge_client_setup_tc(struct hclge_dev *hdev)
return 0;
}
+static int hclge_notify_down_uinit(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+}
+
+static int hclge_notify_init_up(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
bool map_changed = false;
u8 num_tc = 0;
@@ -215,11 +238,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
return ret;
if (map_changed) {
- ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- return ret;
+ netif_dbg(h, drv, netdev, "set ets\n");
- ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
}
@@ -239,11 +260,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclge_notify_init_up(hdev);
if (ret)
return ret;
}
@@ -254,10 +271,8 @@ err_out:
if (!map_changed)
return ret;
- if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
- return ret;
+ hclge_notify_init_up(hdev);
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
@@ -300,6 +315,7 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
@@ -325,6 +341,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->tm_info.hw_pfc_map = pfc_map;
hdev->tm_info.pfc_en = pfc->pfc_en;
+ netif_dbg(h, drv, netdev,
+ "set pfc: pfc_en=%u, pfc_map=%u, num_tc=%u\n",
+ pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
+
hclge_tm_pfc_info_update(hdev);
return hclge_pause_setup_hw(hdev, false);
@@ -345,8 +365,11 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
+ netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
+
/* No support for LLD_MANAGED modes or CEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
(mode & DCB_CAP_DCBX_VER_CEE) ||
@@ -372,11 +395,7 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
if (ret)
return -EINVAL;
- ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- return ret;
-
- ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
@@ -398,17 +417,11 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
else
hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return hclge_notify_init_up(hdev);
err_out:
- if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
- return ret;
+ hclge_notify_init_up(hdev);
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index ab625c757a95..1c6b501fb7ca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -4,32 +4,92 @@
#include <linux/device.h>
#include "hclge_debugfs.h"
-#include "hclge_cmd.h"
#include "hclge_main.h"
#include "hclge_tm.h"
#include "hnae3.h"
+static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+ { .reg_type = "bios common",
+ .dfx_msg = &hclge_dbg_bios_common_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
+ .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
+ .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
+ .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
+ .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_2[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
+ .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
+ { .reg_type = "igu egu",
+ .dfx_msg = &hclge_dbg_igu_egu_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
+ .offset = HCLGE_DBG_DFX_IGU_OFFSET,
+ .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
+ { .reg_type = "rpu",
+ .dfx_msg = &hclge_dbg_rpu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
+ .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
+ { .reg_type = "rpu",
+ .dfx_msg = &hclge_dbg_rpu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
+ .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
+ { .reg_type = "ncsi",
+ .dfx_msg = &hclge_dbg_ncsi_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
+ .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
+ .cmd = HCLGE_OPC_DFX_NCSI_REG } },
+ { .reg_type = "rtc",
+ .dfx_msg = &hclge_dbg_rtc_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
+ .offset = HCLGE_DBG_DFX_RTC_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RTC_REG } },
+ { .reg_type = "ppp",
+ .dfx_msg = &hclge_dbg_ppp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
+ .offset = HCLGE_DBG_DFX_PPP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_PPP_REG } },
+ { .reg_type = "rcb",
+ .dfx_msg = &hclge_dbg_rcb_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
+ .offset = HCLGE_DBG_DFX_RCB_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RCB_REG } },
+ { .reg_type = "tqp",
+ .dfx_msg = &hclge_dbg_tqp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
+ .offset = HCLGE_DBG_DFX_TQP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_TQP_REG } },
+};
+
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{
- struct hclge_desc desc[4];
- int ret;
+#define HCLGE_GET_DFX_REG_TYPE_CNT 4
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
- desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int entries_per_desc;
+ int index;
+ int ret;
- ret = hclge_cmd_send(&hdev->hw, desc, 4);
- if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
dev_err(&hdev->pdev->dev,
- "get dfx bdnum fail, status is %d.\n", ret);
+ "get dfx bdnum fail, ret = %d\n", ret);
return ret;
}
- return (int)desc[offset / 6].data[offset % 6];
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ index = offset % entries_per_desc;
+ return (int)desc[offset / entries_per_desc].data[index];
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
@@ -50,35 +110,40 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "read reg cmd send fail, status is %d.\n", ret);
- return ret;
- }
-
+ "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
return ret;
}
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
- struct hclge_dbg_dfx_message *dfx_message,
- const char *cmd_buf, int msg_num,
- int offset, enum hclge_opcode_type cmd)
+ struct hclge_dbg_reg_type_info *reg_info,
+ const char *cmd_buf)
{
-#define BD_DATA_NUM 6
+#define IDX_OFFSET 1
+ const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
+ struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
struct hclge_desc *desc_src;
struct hclge_desc *desc;
+ int entries_per_desc;
int bd_num, buf_len;
+ int index = 0;
+ int min_num;
int ret, i;
- int index;
- int max;
- ret = kstrtouint(cmd_buf, 10, &index);
- index = (ret != 0) ? 0 : index;
+ if (*s) {
+ ret = kstrtouint(s, 0, &index);
+ index = (ret != 0) ? 0 : index;
+ }
- bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
- if (bd_num <= 0)
+ bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
+ if (bd_num <= 0) {
+ dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
+ reg_msg->offset, bd_num);
return;
+ }
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
@@ -88,22 +153,23 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
}
desc = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd);
- if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+ if (ret) {
kfree(desc_src);
return;
}
- max = (bd_num * BD_DATA_NUM) <= msg_num ?
- (bd_num * BD_DATA_NUM) : msg_num;
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
desc = desc_src;
- for (i = 0; i < max; i++) {
- ((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc;
+ for (i = 0; i < min_num; i++) {
+ if (i > 0 && (i % entries_per_desc) == 0)
+ desc++;
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message,
- desc->data[i % BD_DATA_NUM]);
+ desc->data[i % entries_per_desc]);
dfx_message++;
}
@@ -213,95 +279,25 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
- int msg_num;
-
- if (strncmp(&cmd_buf[9], "bios common", 11) == 0) {
- msg_num = sizeof(hclge_dbg_bios_common_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
- &cmd_buf[21], msg_num,
- HCLGE_DBG_DFX_BIOS_OFFSET,
- HCLGE_OPC_DFX_BIOS_COMMON_REG);
- } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) {
- msg_num = sizeof(hclge_dbg_ssu_reg_0) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_0_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_0);
-
- msg_num = sizeof(hclge_dbg_ssu_reg_1) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_1_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_1);
-
- msg_num = sizeof(hclge_dbg_ssu_reg_2) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_2_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_2);
- } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) {
- msg_num = sizeof(hclge_dbg_igu_egu_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
- &cmd_buf[17], msg_num,
- HCLGE_DBG_DFX_IGU_OFFSET,
- HCLGE_OPC_DFX_IGU_EGU_REG);
- } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rpu_reg_0) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RPU_0_OFFSET,
- HCLGE_OPC_DFX_RPU_REG_0);
-
- msg_num = sizeof(hclge_dbg_rpu_reg_1) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RPU_1_OFFSET,
- HCLGE_OPC_DFX_RPU_REG_1);
- } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) {
- msg_num = sizeof(hclge_dbg_ncsi_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
- &cmd_buf[14], msg_num,
- HCLGE_DBG_DFX_NCSI_OFFSET,
- HCLGE_OPC_DFX_NCSI_REG);
- } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rtc_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RTC_OFFSET,
- HCLGE_OPC_DFX_RTC_REG);
- } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) {
- msg_num = sizeof(hclge_dbg_ppp_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_PPP_OFFSET,
- HCLGE_OPC_DFX_PPP_REG);
- } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rcb_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RCB_OFFSET,
- HCLGE_OPC_DFX_RCB_REG);
- } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) {
- msg_num = sizeof(hclge_dbg_tqp_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_TQP_OFFSET,
- HCLGE_OPC_DFX_TQP_REG);
- } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) {
- hclge_dbg_dump_dcb(hdev, &cmd_buf[13]);
- } else {
+ struct hclge_dbg_reg_type_info *reg_info;
+ bool has_dump = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
+ reg_info = &hclge_dbg_reg_info[i];
+ if (!strncmp(cmd_buf, reg_info->reg_type,
+ strlen(reg_info->reg_type))) {
+ hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
+ has_dump = true;
+ }
+ }
+
+ if (strncmp(cmd_buf, "dcb", 3) == 0) {
+ hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
+ has_dump = true;
+ }
+
+ if (!has_dump) {
dev_info(&hdev->pdev->dev, "unknown command\n");
return;
}
@@ -325,11 +321,17 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
struct hclge_desc desc;
int i, ret;
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tc\n");
+ return;
+ }
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
return;
}
@@ -409,6 +411,12 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -425,7 +433,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
return;
err_tm_pg_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -537,7 +545,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
return;
err_tm_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -556,7 +564,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
int pri_id, ret;
u32 i;
- ret = kstrtouint(&cmd_buf[12], 10, &queue_id);
+ ret = kstrtouint(cmd_buf, 0, &queue_id);
queue_id = (ret != 0) ? 0 : queue_id;
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
@@ -590,6 +598,12 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
queue_id, qset_id, pri_id, tc_id);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
for (group_id = 0; group_id < 32; group_id++) {
@@ -620,7 +634,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
return;
err_tm_map_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -634,7 +648,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
+ dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
ret);
return;
}
@@ -658,7 +672,7 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "dump qos pri map fail, status is %d.\n", ret);
+ "dump qos pri map fail, ret = %d\n", ret);
return;
}
@@ -715,6 +729,34 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
rx_buf_cmd->shared_buf);
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev, "\n");
+ dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+ dev_info(&hdev->pdev->dev, "\n");
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
@@ -723,7 +765,6 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
if (ret)
goto err_qos_cmd_send;
- dev_info(&hdev->pdev->dev, "\n");
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
@@ -733,7 +774,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
@@ -755,37 +797,15 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
rx_com_thrd->com_thrd[i].high,
rx_com_thrd->com_thrd[i].low);
-
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
-
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev, "\n");
- dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
-
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
-
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
-
return;
err_qos_cmd_send:
dev_err(&hdev->pdev->dev,
- "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
+ "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
}
static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
@@ -825,9 +845,9 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- req0->index, req0->mac_add[0], req0->mac_add[1],
- req0->mac_add[2], req0->mac_add[3], req0->mac_add[4],
- req0->mac_add[5]);
+ req0->index, req0->mac_addr[0], req0->mac_addr[1],
+ req0->mac_addr[2], req0->mac_addr[3],
+ req0->mac_addr[4], req0->mac_addr[5]);
snprintf(printf_buf + strlen(printf_buf),
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
@@ -883,14 +903,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
+ /* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ /* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ /* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
@@ -940,7 +963,7 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "get firmware statistics bd number failed, ret=%d\n",
+ "get firmware statistics bd number failed, ret = %d\n",
ret);
return;
}
@@ -961,7 +984,7 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
if (ret) {
kfree(desc_src);
dev_err(&hdev->pdev->dev,
- "get firmware statistics failed, ret=%d\n", ret);
+ "get firmware statistics failed, ret = %d\n", ret);
return;
}
@@ -981,6 +1004,33 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
kfree(desc_src);
}
+#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
+
+static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
+ struct hclge_desc *desc, int *offset,
+ int *length)
+{
+#define HCLGE_CMD_DATA_NUM 6
+
+ int i;
+ int j;
+
+ for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
+ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
+ if (i == 0 && j == 0)
+ continue;
+
+ dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
+ *offset,
+ le32_to_cpu(desc[i].data[j]));
+ *offset += sizeof(u32);
+ *length -= sizeof(u32);
+ if (*length <= 0)
+ return;
+ }
+ }
+}
+
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
@@ -990,17 +1040,13 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
-#define HCLGE_CMD_DATA_NUM 6
- struct hclge_desc desc[5];
- u32 byte_offset;
- int bd_num = 5;
+ struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int offset;
int length;
int data0;
int ret;
- int i;
- int j;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
@@ -1026,22 +1072,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
if (ret)
return;
- byte_offset = offset;
- for (i = 0; i < bd_num; i++) {
- for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
- if (i == 0 && j == 0)
- continue;
-
- dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
- byte_offset,
- le32_to_cpu(desc[i].data[j]));
- byte_offset += sizeof(u32);
- length -= sizeof(u32);
- if (length <= 0)
- return;
- }
- }
- offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
+ hclge_ncl_config_data_print(hdev, desc, &offset, &length);
}
}
@@ -1067,6 +1098,9 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
+#define DUMP_REG "dump reg"
+#define DUMP_TM_MAP "dump tm map"
+
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1074,8 +1108,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_fd_tcam(hdev);
} else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
hclge_dbg_dump_tc(hdev);
- } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) {
- hclge_dbg_dump_tm_map(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
+ hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
} else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
hclge_dbg_dump_tm(hdev);
} else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
@@ -1086,8 +1120,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_qos_buf_cfg(hdev);
} else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
hclge_dbg_dump_mng_table(hdev);
- } else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
- hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
+ hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index d055fda41775..38b79321c4c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -4,6 +4,9 @@
#ifndef __HCLGE_DEBUGFS_H
#define __HCLGE_DEBUGFS_H
+#include <linux/etherdevice.h>
+#include "hclge_cmd.h"
+
#define HCLGE_DBG_BUF_LEN 256
#define HCLGE_DBG_MNG_TBL_MAX 64
@@ -63,9 +66,23 @@ struct hclge_dbg_bitmap_cmd {
};
};
+struct hclge_dbg_reg_common_msg {
+ int msg_num;
+ int offset;
+ enum hclge_opcode_type cmd;
+};
+
+#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
struct hclge_dbg_dfx_message {
int flag;
- char message[60];
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
+#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
+struct hclge_dbg_reg_type_info {
+ const char *reg_type;
+ struct hclge_dbg_dfx_message *dfx_msg;
+ struct hclge_dbg_reg_common_msg reg_msg;
};
#pragma pack()
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 0a7243825e7b..58c6231aaa00 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -637,8 +637,8 @@ static void hclge_log_error(struct device *dev, char *reg,
{
while (err->msg) {
if (err->int_msk & err_sts) {
- dev_warn(dev, "%s %s found [error status=0x%x]\n",
- reg, err->msg, err_sts);
+ dev_err(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
if (err->reset_level &&
err->reset_level != HNAE3_NONE_RESET)
set_bit(err->reset_level, reset_requests);
@@ -652,16 +652,11 @@ static void hclge_log_error(struct device *dev, char *reg,
* @desc: descriptor for describing the command
* @cmd: command opcode
* @flag: flag for extended command structure
- * @w_num: offset for setting the read interrupt type.
- * @int_type: select which type of the interrupt for which the error
- * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
*
* This function query the error info from hw register/s using command
*/
static int hclge_cmd_query_error(struct hclge_dev *hdev,
- struct hclge_desc *desc, u32 cmd,
- u16 flag, u8 w_num,
- enum hclge_err_int_type int_type)
+ struct hclge_desc *desc, u32 cmd, u16 flag)
{
struct device *dev = &hdev->pdev->dev;
int desc_num = 1;
@@ -673,8 +668,6 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
desc_num = 2;
}
- if (w_num)
- desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret)
@@ -872,8 +865,7 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
}
/* configure TM QCN hw errors */
- ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
- 0, 0, 0);
+ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0);
if (ret) {
dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
return ret;
@@ -938,32 +930,44 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
/* configure PPU error interrupts */
if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (en) {
- desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
- desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
- desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
- desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN);
+ desc[1].data[3] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN);
+ desc[1].data[4] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN);
}
- desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
- desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
- desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
- desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK);
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK);
+ desc[1].data[3] |=
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK);
desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
- desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2);
- desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK);
} else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
- desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN);
- desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK);
} else {
dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
return -EINVAL;
@@ -1171,8 +1175,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
- dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
- status);
+ dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
+ status);
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
@@ -1208,8 +1212,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 1));
if (status) {
- dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
- "rpu_rx_pkt_ecc_mbit_err");
+ dev_err(dev,
+ "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n");
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
@@ -1321,10 +1325,12 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
- if (status)
+ if (status) {
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
&hclge_ppu_pf_abnormal_int[0], status,
&ae_dev->hw_err_reset_req);
+ hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR);
+ }
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
@@ -1387,17 +1393,17 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
return ret;
}
- dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
- le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
- le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
- le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
- le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
- dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
- le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
- le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
+ dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
+ le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
+ le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
+ dev_err(dev, "AXI3: %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
+ le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
return 0;
}
@@ -1410,18 +1416,18 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
- HCLGE_CMD_FLAG_NEXT, 0, 0);
+ HCLGE_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
}
- dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
- le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
- le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
- le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
+ dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
+ le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
return 0;
}
@@ -1434,7 +1440,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
/* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
- 0, 0, 0);
+ 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
return ret;
@@ -1450,9 +1456,9 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
le32_to_cpu(desc[0].data[0]);
while (err->msg) {
if (err->int_msk == err_sts) {
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg,
- le32_to_cpu(desc[0].data[0]));
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg,
+ le32_to_cpu(desc[0].data[0]));
break;
}
err++;
@@ -1460,13 +1466,13 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
}
if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
- dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
- le32_to_cpu(desc[0].data[1]));
+ dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[1]));
}
if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
- dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
- le32_to_cpu(desc[0].data[2]));
+ dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[2]));
}
return 0;
@@ -1483,8 +1489,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
/* read RAS error interrupt status */
ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
- 0, 0, 0);
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
/* reset everything for now */
@@ -1495,10 +1500,10 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
- dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+ dev_err(dev, "ROCEE RAS AXI rresp error\n");
if (status & HCLGE_ROCEE_BERR_INT_MASK)
- dev_warn(dev, "ROCEE RAS AXI bresp error\n");
+ dev_err(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
@@ -1508,7 +1513,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
- dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
+ dev_err(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET;
ret = hclge_log_rocee_ecc_error(hdev);
@@ -1566,8 +1571,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
- enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
+ enum hnae3_reset_type reset_type;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21)
@@ -1649,16 +1654,16 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
- dev_warn(dev,
- "HNS Non-Fatal RAS error(status=0x%x) identified\n",
- status);
+ dev_err(dev,
+ "HNS Non-Fatal RAS error(status=0x%x) identified\n",
+ status);
hclge_handle_all_ras_errors(hdev);
}
/* Handling Non-fatal Rocee RAS errors */
if (hdev->pdev->revision >= 0x21 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
- dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n");
+ dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
@@ -1737,8 +1742,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
- vf_id, q_id);
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
@@ -1755,8 +1760,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vf(%u) failed %d!\n",
+ hdev->vport->vport_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
@@ -1802,8 +1807,8 @@ static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status)
- dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
- status);
+ dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
+ status);
/* clear all main PF MSIx errors */
ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
@@ -1997,7 +2002,7 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
/* Handle Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
- dev_warn(dev, "HNS hw error(RAS) identified during init\n");
+ dev_err(dev, "HNS hw error(RAS) identified during init\n");
hclge_handle_all_ras_errors(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 7ea8bb28a0cb..876fd81ad2f1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -5,6 +5,7 @@
#define __HCLGE_ERR_H
#include "hclge_main.h"
+#include "hnae3.h"
#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3fde5471e1c0..2b65f2799846 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -35,6 +35,25 @@
#define BUF_RESERVE_PERCENT 90
#define HCLGE_RESET_MAX_FAIL_CNT 5
+#define HCLGE_RESET_SYNC_TIME 100
+#define HCLGE_PF_RESET_SYNC_TIME 20
+#define HCLGE_PF_RESET_SYNC_CNT 1500
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
+
+#define HCLGE_LINK_STATUS_MS 10
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
@@ -317,6 +336,80 @@ static const u8 hclge_hash_key[] = {
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -364,9 +457,13 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
u16 i, k, n;
int ret;
- desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ /* This may be called inside atomic sections,
+ * so GFP_ATOMIC is more suitalbe here
+ */
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
+
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
if (ret) {
@@ -647,6 +744,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
count += 2;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+
+ if (hdev->hw.mac.phydev) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
+ }
+
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
hclge_tqps_get_sset_count(handle, stringset);
@@ -702,14 +805,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_tqps_get_stats(handle, p);
}
-static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt)
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
- *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+ hclge_update_stats(handle, NULL);
+
+ mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
@@ -1075,6 +1180,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
+
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1270,6 +1376,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
+ /* Set the init affinity based on pci func number */
+ i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
+ i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
+ cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
+ &hdev->affinity_mask);
+
return ret;
}
@@ -2499,22 +2611,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->mbx_service_task);
+ queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
+ &hdev->mbx_service_task);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->rst_service_task);
+ queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
+ &hdev->rst_service_task);
}
-static void hclge_task_schedule(struct hclge_dev *hdev)
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- (void)schedule_work(&hdev->service_task);
+ !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
+ hdev->hw_stats.stats_timer++;
+ hdev->fd_arfs_expire_timer++;
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ system_wq, &hdev->service_task,
+ delay_time);
+ }
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
@@ -2729,25 +2848,6 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
-static void hclge_service_timer(struct timer_list *t)
-{
- struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
-
- mod_timer(&hdev->service_timer, jiffies + HZ);
- hdev->hw_stats.stats_timer++;
- hdev->fd_arfs_expire_timer++;
- hclge_task_schedule(hdev);
-}
-
-static void hclge_service_complete(struct hclge_dev *hdev)
-{
- WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
-
- /* Flush memory before next watchdog */
- smp_mb__before_atomic();
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
-}
-
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
@@ -2763,9 +2863,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
* defer the processing of the mailbox events. Since, we would have not
* cleared RX CMDQ event this time we would receive again another
* interrupt from H/W just for the mailbox.
+ *
+ * check for vector0 reset event sources
*/
-
- /* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
@@ -2882,10 +2982,15 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
break;
}
- /* clear the source of interrupt if it is not cause by reset */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+
+ /* Enable interrupt if it is not cause by reset. And when
+ * clearval equal to 0, it means interrupt status may be
+ * cleared by hardware before driver reads status register.
+ * For this case, vector0 interrupt also should be enabled.
+ */
if (!clearval ||
event_cause == HCLGE_VECTOR0_EVENT_MBX) {
- hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2918,6 +3023,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
+static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
+ affinity_notify);
+
+ cpumask_copy(&hdev->affinity_mask, mask);
+}
+
+static void hclge_irq_affinity_release(struct kref *ref)
+{
+}
+
+static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
+{
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq,
+ &hdev->affinity_mask);
+
+ hdev->affinity_notify.notify = hclge_irq_affinity_notify;
+ hdev->affinity_notify.release = hclge_irq_affinity_release;
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
+ &hdev->affinity_notify);
+}
+
+static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
+{
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
+}
+
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -3105,6 +3240,71 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return 0;
}
+static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_sync_cmd *req;
+ struct hclge_desc desc;
+ int cnt = 0;
+ int ret;
+
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
+
+ do {
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* for compatible with old firmware, wait
+ * 100 ms for VF to stop IO
+ */
+ if (ret == -EOPNOTSUPP) {
+ msleep(HCLGE_RESET_SYNC_TIME);
+ return 0;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return ret;
+ } else if (req->all_vf_ready) {
+ return 0;
+ }
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
+ hclge_cmd_reuse_desc(&desc, true);
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
+
+ dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
+ return -ETIME;
+}
+
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ u16 i;
+
+ if (!client || !client->ops->process_hw_error ||
+ !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
+ client->ops->process_hw_error(&hdev->vport[i].nic, type);
+}
+
+static void hclge_handle_imp_error(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
+ hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
+ hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -3229,7 +3429,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
if (!clearval)
return;
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
+ /* For revision 0x20, the reset interrupt source
+ * can only be cleared after hardware reset done
+ */
+ if (hdev->pdev->revision == 0x20)
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
+ clearval);
+
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -3250,19 +3456,33 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev)
return ret;
}
-static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
-#define HCLGE_RESET_SYNC_TIME 100
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGE_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+}
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
u32 reg_val;
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
+ /* to confirm whether all running VF is ready
+ * before request PF reset
*/
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_sync_vf(hdev);
+ if (ret)
+ return ret;
+
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3279,15 +3499,19 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
+ /* to confirm whether all running VF is ready
+ * before request PF reset
*/
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_sync_vf(hdev);
+ if (ret)
+ return ret;
+
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
+ hclge_handle_imp_error(hdev);
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
@@ -3298,14 +3522,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* inform hardware that preparatory work is done */
msleep(HCLGE_RESET_SYNC_TIME);
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CMQ_ENABLE);
+ hclge_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
return ret;
}
-static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
@@ -3313,36 +3536,42 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
hdev->reset_pending);
return true;
- } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
- (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
- BIT(HCLGE_IMP_RESET_BIT))) {
+ } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_RESET_INT_M) {
dev_info(&hdev->pdev->dev,
- "reset failed because IMP Reset is pending\n");
+ "reset failed because new reset interrupt\n");
hclge_clear_reset_cause(hdev);
return false;
} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->reset_fail_cnt++;
- if (is_timeout) {
- set_bit(hdev->reset_type, &hdev->reset_pending);
- dev_info(&hdev->pdev->dev,
- "re-schedule to wait for hw reset done\n");
- return true;
- }
-
- dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
- hclge_clear_reset_cause(hdev);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
- mod_timer(&hdev->reset_timer,
- jiffies + HCLGE_RESET_INTERVAL);
-
- return false;
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule reset task(%d)\n",
+ hdev->reset_fail_cnt);
+ return true;
}
hclge_clear_reset_cause(hdev);
+
+ /* recover the handshake status when reset fail */
+ hclge_reset_handshake(hdev, true);
+
dev_err(&hdev->pdev->dev, "Reset fail!\n");
return false;
}
+static int hclge_set_rst_done(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_done_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
{
int ret = 0;
@@ -3353,10 +3582,18 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
+ case HNAE3_GLOBAL_RESET:
+ /* fall through */
+ case HNAE3_IMP_RESET:
+ ret = hclge_set_rst_done(hdev);
+ break;
default:
break;
}
+ /* clear up the handshake status after re-initialize done */
+ hclge_reset_handshake(hdev, false);
+
return ret;
}
@@ -3382,7 +3619,6 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- bool is_timeout = false;
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
@@ -3410,10 +3646,8 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset;
- if (hclge_reset_wait(hdev)) {
- is_timeout = true;
+ if (hclge_reset_wait(hdev))
goto err_reset;
- }
hdev->rst_stats.hw_reset_done_cnt++;
@@ -3458,14 +3692,22 @@ static void hclge_reset(struct hclge_dev *hdev)
hdev->reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
- del_timer(&hdev->reset_timer);
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ hdev->reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (hdev->reset_level != HNAE3_NONE_RESET)
+ set_bit(hdev->reset_level, &hdev->reset_request);
return;
err_reset_lock:
rtnl_unlock();
err_reset:
- if (hclge_reset_err_handle(hdev, is_timeout))
+ if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev);
}
@@ -3493,9 +3735,10 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time +
- HCLGE_RESET_INTERVAL)))
+ HCLGE_RESET_INTERVAL))) {
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- else if (hdev->default_reset_request)
+ } else if (hdev->default_reset_request)
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
@@ -3525,6 +3768,12 @@ static void hclge_reset_timer(struct timer_list *t)
{
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ /* if default_reset_request has no value, it means that this reset
+ * request has already be handled, so just return here
+ */
+ if (!hdev->default_reset_request)
+ return;
+
dev_info(&hdev->pdev->dev,
"triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
@@ -3606,7 +3855,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, service_task);
+ container_of(work, struct hclge_dev, service_task.work);
+
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev);
@@ -3621,7 +3872,8 @@ static void hclge_service_task(struct work_struct *work)
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
}
- hclge_service_complete(hdev);
+
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
@@ -4197,8 +4449,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- struct hclge_ctrl_vector_chain_cmd *req
- = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
enum hclge_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
@@ -5808,7 +6060,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
return -ENOSPC;
}
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5959,6 +6211,89 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
+static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
+ u8 switch_param, u8 param_mask)
+{
+ struct hclge_mac_vlan_switch_cmd *req;
+ struct hclge_desc desc;
+ u32 func_id;
+ int ret;
+
+ func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
+ req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
+ false);
+ req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
+ req->func_id = cpu_to_le32(func_id);
+ req->switch_param = switch_param;
+ req->param_mask = param_mask;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "set mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+}
+
+static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
+ int link_ret)
+{
+#define HCLGE_PHY_LINK_STATUS_NUM 200
+
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int i = 0;
+ int ret;
+
+ do {
+ ret = phy_read_status(phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "phy update link status fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (phydev->link == link_ret)
+ break;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
+}
+
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
+{
+#define HCLGE_MAC_LINK_STATUS_NUM 100
+
+ int i = 0;
+ int ret;
+
+ do {
+ ret = hclge_get_mac_link_status(hdev);
+ if (ret < 0)
+ return ret;
+ else if (ret == link_ret)
+ return 0;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ return -EBUSY;
+}
+
+static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
+ bool is_phy)
+{
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
+ int link_ret;
+
+ link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+
+ if (is_phy)
+ hclge_phy_link_status_wait(hdev, link_ret);
+
+ return hclge_mac_link_status_wait(hdev, link_ret);
+}
+
static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
{
struct hclge_config_mac_mode_cmd *req;
@@ -6001,14 +6336,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
-#define HCLGE_MAC_LINK_STATUS_MS 10
-#define HCLGE_MAC_LINK_STATUS_NUM 100
-#define HCLGE_MAC_LINK_STATUS_DOWN 0
-#define HCLGE_MAC_LINK_STATUS_UP 1
-
struct hclge_serdes_lb_cmd *req;
struct hclge_desc desc;
- int mac_link_ret = 0;
int ret, i = 0;
u8 loop_mode_b;
@@ -6031,10 +6360,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
if (en) {
req->enable = loop_mode_b;
req->mask = loop_mode_b;
- mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
} else {
req->mask = loop_mode_b;
- mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -6067,18 +6394,70 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
hclge_cfg_mac_mode(hdev, en);
- i = 0;
- do {
- /* serdes Internal loopback, independent of the network cable.*/
- msleep(HCLGE_MAC_LINK_STATUS_MS);
- ret = hclge_get_mac_link_status(hdev);
- if (ret == mac_link_ret)
- return 0;
- } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback config mac mode timeout\n");
- dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
+ return ret;
+}
- return -EBUSY;
+static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->suspended) {
+ ret = phy_suspend(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_resume(phydev);
+ if (ret)
+ return ret;
+
+ return phy_loopback(phydev, true);
+}
+
+static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_loopback(phydev, false);
+ if (ret)
+ return ret;
+
+ return phy_suspend(phydev);
+}
+
+static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret;
+
+ if (!phydev)
+ return -ENOTSUPP;
+
+ if (en)
+ ret = hclge_enable_phy_loopback(hdev, phydev);
+ else
+ ret = hclge_disable_phy_loopback(hdev, phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set phy loopback fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ hclge_cfg_mac_mode(hdev, en);
+
+ ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "phy loopback config mac mode timeout\n");
+
+ return ret;
}
static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
@@ -6110,6 +6489,20 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
int i, ret;
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
+ * the same, the packets are looped back in the SSU. If SSU loopback
+ * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
+ */
+ if (hdev->pdev->revision >= 0x21) {
+ u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
+
+ ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
+ HCLGE_SWITCH_ALW_LPBK_MASK);
+ if (ret)
+ return ret;
+ }
+
switch (loop_mode) {
case HNAE3_LOOP_APP:
ret = hclge_set_app_loopback(hdev, en);
@@ -6118,6 +6511,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PARALLEL_SERDES:
ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
break;
+ case HNAE3_LOOP_PHY:
+ ret = hclge_set_phy_loopback(hdev, en);
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -6160,10 +6556,13 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct hclge_dev *hdev = vport->back;
if (enable) {
- mod_timer(&hdev->service_timer, jiffies + HZ);
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
} else {
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
+ /* Set the DOWN flag here to disable the service to be
+ * scheduled again
+ */
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ cancel_delayed_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
}
@@ -6202,12 +6601,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
return;
}
for (i = 0; i < handle->kinfo.num_tqps; i++)
hclge_reset_tqp(handle, i);
+ hclge_config_mac_tnl_int(hdev, false);
+
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -6249,7 +6651,6 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
enum hclge_mac_vlan_tbl_opcode op)
{
struct hclge_dev *hdev = vport->back;
- int return_status = -EIO;
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
@@ -6260,52 +6661,53 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) {
- return_status = 0;
+ return 0;
} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n");
+ return -ENOSPC;
} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOSPC;
}
+
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_REMOVE) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"remove mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "remove mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
+
+ dev_err(&hdev->pdev->dev,
+ "remove mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_LKUP) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"lookup mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "lookup mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
- } else {
- return_status = -EINVAL;
+
dev_err(&hdev->pdev->dev,
- "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
- op);
+ "lookup mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
}
- return return_status;
+ dev_err(&hdev->pdev->dev,
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
+
+ return -EINVAL;
}
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
@@ -7019,7 +7421,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
dev_err(&hdev->pdev->dev,
- "Change uc mac err! invalid mac:%p.\n",
+ "Change uc mac err! invalid mac:%pM.\n",
new_addr);
return -EINVAL;
}
@@ -7124,7 +7526,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan, u8 qos,
+ bool is_kill, u16 vlan,
__be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
@@ -7235,7 +7637,7 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
}
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
- u16 vport_id, u16 vlan_id, u8 qos,
+ u16 vport_id, u16 vlan_id,
bool is_kill)
{
u16 vport_idx, vport_num = 0;
@@ -7245,7 +7647,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return 0;
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
- 0, proto);
+ proto);
if (ret) {
dev_err(&hdev->pdev->dev,
"Set %d vport vlan filter config fail, ret =%d.\n",
@@ -7532,7 +7934,7 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0, false);
+ vlan->vlan_id, false);
if (ret) {
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
@@ -7558,7 +7960,7 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan_id, 0,
+ vlan_id,
true);
list_del(&vlan->node);
@@ -7578,7 +7980,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0,
+ vlan->vlan_id,
true);
vlan->hd_tbl_status = false;
@@ -7611,7 +8013,7 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
- u16 vlan_proto, qos;
+ u16 vlan_proto;
u16 state, vlan_id;
int i;
@@ -7620,12 +8022,11 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
state = vport->port_base_vlan_cfg.state;
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id, qos,
+ vport->vport_id, vlan_id,
false);
continue;
}
@@ -7635,7 +8036,7 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0,
+ vlan->vlan_id,
false);
}
}
@@ -7675,12 +8076,12 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
htons(new_info->vlan_proto),
vport->vport_id,
new_info->vlan_tag,
- new_info->qos, false);
+ false);
}
ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
vport->vport_id, old_info->vlan_tag,
- old_info->qos, true);
+ true);
if (ret)
return ret;
@@ -7707,7 +8108,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
htons(vlan_info->vlan_proto),
vport->vport_id,
vlan_info->vlan_tag,
- vlan_info->qos, false);
+ false);
if (ret)
return ret;
@@ -7716,7 +8117,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
htons(old_vlan_info->vlan_proto),
vport->vport_id,
old_vlan_info->vlan_tag,
- old_vlan_info->qos, true);
+ true);
if (ret)
return ret;
@@ -7829,7 +8230,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
return -EBUSY;
}
- /* When port base vlan enabled, we use port base vlan as the vlan
+ /* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
* when user add new vlan or remove exist vlan, just update the vport
* vlan list. The vlan id in vlan list will be writen in vlan filter
@@ -7837,7 +8238,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
- vlan_id, 0, is_kill);
+ vlan_id, is_kill);
writen_to_tbl = true;
}
@@ -7848,7 +8249,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
} else if (is_kill) {
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
@@ -7873,7 +8274,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
while (vlan_id != VLAN_N_VID) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
- 0, true);
+ true);
if (ret && ret != -EINVAL)
return;
@@ -8044,11 +8445,12 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- /* Wait for tqp hw reset */
- msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
@@ -8082,11 +8484,12 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- /* Wait for tqp hw reset */
- msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
@@ -8122,28 +8525,15 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
int ret;
- if (rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_FULL;
- else if (rx_en && !tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
- else if (!rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
- else
- hdev->fc_mode_last_time = HCLGE_FC_NONE;
-
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
return 0;
ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
- if (ret) {
- dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
- ret);
- return ret;
- }
-
- hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "configure pauseparam error, ret = %d.\n", ret);
- return 0;
+ return ret;
}
int hclge_cfg_flowctrl(struct hclge_dev *hdev)
@@ -8208,6 +8598,21 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
+ u32 rx_en, u32 tx_en)
+{
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+}
+
static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
u32 rx_en, u32 tx_en)
{
@@ -8233,6 +8638,8 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
+
if (!auto_neg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
@@ -8481,7 +8888,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- return ret;
+ return 0;
clear_nic:
hdev->nic_client = NULL;
@@ -8602,12 +9009,10 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
if (hdev->reset_timer.function)
del_timer_sync(&hdev->reset_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
cancel_work_sync(&hdev->rst_service_task);
if (hdev->mbx_service_task.func)
@@ -8812,12 +9217,16 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev);
- timer_setup(&hdev->service_timer, hclge_service_timer, 0);
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
- INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
+ /* Setup affinity after service timer setup because add_timer_on
+ * is called in affinity notify.
+ */
+ hclge_misc_affinity_setup(hdev);
+
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
@@ -8842,7 +9251,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
- pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
return 0;
err_mdiobus_unreg:
@@ -8979,6 +9390,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
+ hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
if (mac->phydev)
@@ -9238,106 +9650,314 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
}
#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFFFFFFFF
+#define SEPARATOR_VALUE 0xFDFCFBFA
#define REG_NUM_PER_LINE 4
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+#define REG_SEPARATOR_LINE 1
+#define REG_NUM_REMAIN_MASK 3
+#define BD_LIST_MAX_NUM 30
-static int hclge_get_regs_len(struct hnae3_handle *handle)
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
+ /*prepare 4 commands to query DFX BD number*/
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, 4);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+#define HCLGE_DFX_REG_BD_NUM 4
+
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
int ret;
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return -EOPNOTSUPP;
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
}
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
- return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
- regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+ return ret;
}
-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
- int i, j, reg_um, separator_num;
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, separator_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ return reg_num + separator_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int data_len_per_desc, data_len, bd_num, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
int ret;
- *version = hdev->fw_version;
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ data_len = data_len_per_desc * bd_num;
+ *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+ }
+
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
+ struct hclge_desc *desc_src;
+ u32 *reg = data;
+ int ret;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return;
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
+ return -ENOMEM;
}
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ break;
+ }
+
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ kfree(desc_src);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num, separator_num;
+ int data_num_sum;
+ u32 *reg = data;
+
/* fetching per-PF registers valus from PF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum = reg_num + separator_num;
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum += reg_num + separator_num;
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < kinfo->num_tqps; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGE_RING_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
- 4 * j);
+ HCLGE_RING_INT_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int regs_lines_32_bit, regs_lines_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
+ regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int i, reg_num, separator_num, ret;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
- /* fetching PF common registers values from firmware */
ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get 32 bit register failed, ret = %d.\n", ret);
return;
}
+ reg_num = regs_num_32_bit;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
- reg += regs_num_32_bit;
ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg_num = regs_num_64_bit * 2;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
}
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
@@ -9452,7 +10072,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
- .get_mac_pause_stats = hclge_get_mac_pause_stat,
+ .get_mac_stats = hclge_get_mac_stat,
.update_stats = hclge_update_stats,
.get_strings = hclge_get_strings,
.get_sset_count = hclge_get_sset_count,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 6a12285f4c76..870550fa9ff1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -119,7 +119,7 @@
#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
(HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
-#define HCLGE_TQP_RESET_TRY_TIMES 10
+#define HCLGE_TQP_RESET_TRY_TIMES 200
#define HCLGE_PHY_PAGE_MDIX 0
#define HCLGE_PHY_PAGE_COPPER 0
@@ -148,6 +148,8 @@ enum HLCGE_PORT_TYPE {
NETWORK_PORT
};
+#define PF_VPORT_ID 0
+
#define HCLGE_PF_ID_S 0
#define HCLGE_PF_ID_M GENMASK(2, 0)
#define HCLGE_VF_ID_S 3
@@ -164,6 +166,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_IMP_RESET_BIT 2
+#define HCLGE_RESET_INT_M GENMASK(2, 0)
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -178,6 +181,8 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
+#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
@@ -302,6 +307,13 @@ enum hclge_fc_mode {
HCLGE_FC_DEFAULT
};
+enum hclge_link_fail_code {
+ HCLGE_LF_NORMAL,
+ HCLGE_LF_REF_CLOCK_LOST,
+ HCLGE_LF_XSFP_TX_DISABLE,
+ HCLGE_LF_XSFP_ABSENT,
+};
+
#define HCLGE_PG_NUM 4
#define HCLGE_SCH_MODE_SP 0
#define HCLGE_SCH_MODE_DWRR 1
@@ -532,50 +544,6 @@ struct key_info {
u8 key_length; /* use bit as unit */
};
-static const struct key_info meta_data_key_info[] = {
- { PACKET_TYPE_ID, 6},
- { IP_FRAGEMENT, 1},
- { ROCE_TYPE, 1},
- { NEXT_KEY, 5},
- { VLAN_NUMBER, 2},
- { SRC_VPORT, 12},
- { DST_VPORT, 12},
- { TUNNEL_PACKET, 1},
-};
-
-static const struct key_info tuple_key_info[] = {
- { OUTER_DST_MAC, 48},
- { OUTER_SRC_MAC, 48},
- { OUTER_VLAN_TAG_FST, 16},
- { OUTER_VLAN_TAG_SEC, 16},
- { OUTER_ETH_TYPE, 16},
- { OUTER_L2_RSV, 16},
- { OUTER_IP_TOS, 8},
- { OUTER_IP_PROTO, 8},
- { OUTER_SRC_IP, 32},
- { OUTER_DST_IP, 32},
- { OUTER_L3_RSV, 16},
- { OUTER_SRC_PORT, 16},
- { OUTER_DST_PORT, 16},
- { OUTER_L4_RSV, 32},
- { OUTER_TUN_VNI, 24},
- { OUTER_TUN_FLOW_ID, 8},
- { INNER_DST_MAC, 48},
- { INNER_SRC_MAC, 48},
- { INNER_VLAN_TAG_FST, 16},
- { INNER_VLAN_TAG_SEC, 16},
- { INNER_ETH_TYPE, 16},
- { INNER_L2_RSV, 16},
- { INNER_IP_TOS, 8},
- { INNER_IP_PROTO, 8},
- { INNER_SRC_IP, 32},
- { INNER_DST_IP, 32},
- { INNER_L3_RSV, 16},
- { INNER_SRC_PORT, 16},
- { INNER_DST_PORT, 16},
- { INNER_L4_RSV, 32},
-};
-
#define MAX_KEY_LENGTH 400
#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
@@ -806,9 +774,8 @@ struct hclge_dev {
u16 adminq_work_limit; /* Num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
- struct timer_list service_timer;
struct timer_list reset_timer;
- struct work_struct service_task;
+ struct delayed_work service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -864,6 +831,10 @@ struct hclge_dev {
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
+
+ /* affinity mask and notify for misc interrupt */
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
};
/* VPort level vlan tag configuration for TX direction */
@@ -990,7 +961,6 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
-int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
@@ -1018,4 +988,9 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state, u16 vlan_tag, u16 qos,
u16 vlan_proto);
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc);
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 690b9990215c..f5da28a60d00 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -479,7 +479,7 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
hclge_reset_vf_queue(vport, queue_id);
- /* send response msg to VF after queue reset complete*/
+ /* send response msg to VF after queue reset complete */
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
@@ -545,6 +545,36 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
HCLGE_RSS_MBX_RESP_LEN);
}
+static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
+{
+ switch (link_fail_code) {
+ case HCLGE_LF_REF_CLOCK_LOST:
+ dev_warn(&hdev->pdev->dev, "Reference clock lost!\n");
+ break;
+ case HCLGE_LF_XSFP_TX_DISABLE:
+ dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n");
+ break;
+ case HCLGE_LF_XSFP_ABSENT:
+ dev_warn(&hdev->pdev->dev, "SFP is absent!\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_handle_link_change_event(struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+#define LINK_STATUS_OFFSET 1
+#define LINK_FAIL_CODE_OFFSET 2
+
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
+
+ if (!req->msg[LINK_STATUS_OFFSET])
+ hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
+}
+
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
@@ -552,6 +582,15 @@ static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
return tail == hw->cmq.crq.next_to_use;
}
+static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+
+ ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
+ dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -707,6 +746,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF fail(%d) to media type for VF\n",
ret);
break;
+ case HCLGE_MBX_PUSH_LINK_STATUS:
+ hclge_handle_link_change_event(hdev, req);
+ break;
+ case HCLGE_MBX_NCSI_ERROR:
+ hclge_handle_ncsi_error(hdev);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index abb1b438564e..dc4dfd4602ab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -231,6 +231,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydev->advertising);
+ phy_attached_info(phydev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 3f41fa2bc414..e829101d576c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -404,8 +404,8 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_desc desc;
- u32 shapping_para = 0;
u8 ir_u, ir_b, ir_s;
+ u32 shapping_para;
int ret;
ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
@@ -650,12 +650,8 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
}
}
-static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
+static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
{
- if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
- (hdev->tm_info.num_pg != 1))
- return -EINVAL;
-
hclge_tm_pg_info_init(hdev);
hclge_tm_tc_info_init(hdev);
@@ -663,8 +659,6 @@ static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev);
hclge_pfc_info_init(hdev);
-
- return 0;
}
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -1428,15 +1422,15 @@ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
int hclge_tm_schd_init(struct hclge_dev *hdev)
{
- int ret;
-
/* fc_mode is HCLGE_FC_FULL on reset */
hdev->tm_info.fc_mode = HCLGE_FC_FULL;
hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
- ret = hclge_tm_schd_info_init(hdev);
- if (ret)
- return ret;
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
+ hdev->tm_info.num_pg != 1)
+ return -EINVAL;
+
+ hclge_tm_schd_info_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 652b796044e3..4c2c9458648f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -43,7 +43,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int clean = 0;
+ int clean;
u32 head;
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
@@ -97,7 +97,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGEVF_NIC_SW_RST_RDY;
+ reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
@@ -405,7 +407,15 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
}
hdev->fw_version = version;
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 127a434a56f3..f830eef02e5c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -244,8 +244,11 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
-#define HCLGEVF_NIC_CMQ_EN_B 16
-#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B)
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGEVF_NIC_SW_RST_RDY_B 16
+#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
+
#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a13a0e101c3b..594cae8c7410 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1269,7 +1269,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
@@ -1396,19 +1396,22 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
u32 val;
int ret;
- /* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
- dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
-
if (hdev->reset_type == HNAE3_FLR_RESET)
return hclgevf_flr_poll_timeout(hdev,
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_CNT);
-
- ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
- !(val & HCLGEVF_RST_ING_BITS),
- HCLGEVF_RESET_WAIT_US,
- HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else if (hdev->reset_type == HNAE3_VF_RESET)
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_VF_RST_ING, val,
+ !(val & HCLGEVF_VF_RST_ING_BIT),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
if (ret) {
@@ -1426,6 +1429,20 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
return 0;
}
+static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGEVF_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
+
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ reg_val);
+}
+
static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
{
int ret;
@@ -1448,7 +1465,14 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ if (ret)
+ return ret;
+
+ /* clear handshake status with IMP */
+ hclgevf_reset_handshake(hdev, false);
+
+ return 0;
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -1474,8 +1498,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CMQ_ENABLE);
+ hclgevf_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
hdev->reset_type, ret);
@@ -1484,6 +1507,8 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
{
+ /* recover handshake status with IMP when reset fail */
+ hclgevf_reset_handshake(hdev, true);
hdev->rst_stats.rst_fail_cnt++;
dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
hdev->rst_stats.rst_fail_cnt);
@@ -1494,9 +1519,6 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- } else {
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CMQ_ENABLE);
}
}
@@ -1539,7 +1561,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
rtnl_lock();
- /* now, re-initialize the nic client and ae device*/
+ /* now, re-initialize the nic client and ae device */
ret = hclgevf_reset_stack(hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
@@ -1762,9 +1784,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* 1b and 2. cases but we will not get any intimation about 1a
* from PF as cmdq would be in unreliable state i.e. mailbox
* communication between PF and VF would be broken.
- */
-
- /* if we are never geting into pending state it means either:
+ *
+ * if we are never geting into pending state it means either:
* 1. PF is not receiving our request which could be due to IMP
* reset
* 2. PF is screwed
@@ -1867,29 +1888,45 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
u32 *clearval)
{
- u32 cmdq_src_reg, rst_ing_reg;
+ u32 val, cmdq_stat_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
- cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_STAT_REG);
- if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
- *clearval = cmdq_src_reg;
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
hdev->rst_stats.vf_rst_cnt++;
+ /* set up VF hardware reset status, its PF will clear
+ * this status when PF has initialized done.
+ */
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
+ val | HCLGEVF_VF_RST_ING_BIT);
return HCLGEVF_VECTOR0_EVENT_RST;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
- if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
- *clearval = cmdq_src_reg;
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
+ /* for revision 0x21, clearing interrupt is writing bit 0
+ * to the clear register, writing bit 1 means to keep the
+ * old value.
+ * for revision 0x20, the clear register is a read & write
+ * register, so we should just write 0 to the bit we are
+ * handling, and keep other bits as cmdq_stat_reg.
+ */
+ if (hdev->pdev->revision >= 0x21)
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ else
+ *clearval = cmdq_stat_reg &
+ ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+
return HCLGEVF_VECTOR0_EVENT_MBX;
}
@@ -2265,7 +2302,7 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
- int ret = 0;
+ int ret;
hclgevf_get_misc_vector(hdev);
@@ -2695,7 +2732,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
}
hdev->last_reset_time = jiffies;
- pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
+ HCLGEVF_DRIVER_NAME);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 5a9e30998a8f..bdde3afc286b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -87,6 +87,8 @@
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -103,6 +105,9 @@
(HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
+#define HCLGEVF_VF_RST_ING 0x07008
+#define HCLGEVF_VF_RST_ING_BIT BIT(16)
+
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
#define HCLGEVF_RSS_KEY_SIZE 40
@@ -120,7 +125,7 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
-#define HCLGEVF_STATS_TIMER_INTERVAL (36)
+#define HCLGEVF_STATS_TIMER_INTERVAL 36U
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 6a96987bd8f0..a108191c9e50 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -277,9 +277,9 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
switch (msg_q[0]) {
case HCLGE_MBX_LINK_STAT_CHANGE:
- link_status = le16_to_cpu(msg_q[1]);
+ link_status = msg_q[1];
memcpy(&speed, &msg_q[2], sizeof(speed));
- duplex = (u8)le16_to_cpu(msg_q[4]);
+ duplex = (u8)msg_q[4];
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
@@ -287,7 +287,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
break;
case HCLGE_MBX_LINK_STAT_MODE:
- idx = (u8)le16_to_cpu(msg_q[1]);
+ idx = (u8)msg_q[1];
if (idx)
memcpy(&hdev->hw.mac.supported, &msg_q[2],
sizeof(unsigned long));
@@ -301,14 +301,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- reset_type = le16_to_cpu(msg_q[1]);
+ reset_type = (enum hnae3_reset_type)msg_q[1];
set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
break;
case HCLGE_MBX_PUSH_VLAN_INFO:
- state = le16_to_cpu(msg_q[1]);
+ state = msg_q[1];
vlan_info = &msg_q[1];
hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info, 8);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9c78251f9c39..0e13d1c7e474 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -136,7 +136,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma_addr;
int i, j;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index cca71ba7a74a..13e30eba5349 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1577,20 +1577,16 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
ehea_destroy_eq(pr->eq);
for (i = 0; i < pr->rq1_skba.len; i++)
- if (pr->rq1_skba.arr[i])
- dev_kfree_skb(pr->rq1_skba.arr[i]);
+ dev_kfree_skb(pr->rq1_skba.arr[i]);
for (i = 0; i < pr->rq2_skba.len; i++)
- if (pr->rq2_skba.arr[i])
- dev_kfree_skb(pr->rq2_skba.arr[i]);
+ dev_kfree_skb(pr->rq2_skba.arr[i]);
for (i = 0; i < pr->rq3_skba.len; i++)
- if (pr->rq3_skba.arr[i])
- dev_kfree_skb(pr->rq3_skba.arr[i]);
+ dev_kfree_skb(pr->rq3_skba.arr[i]);
for (i = 0; i < pr->sq_skba.len; i++)
- if (pr->sq_skba.arr[i])
- dev_kfree_skb(pr->sq_skba.arr[i]);
+ dev_kfree_skb(pr->sq_skba.arr[i]);
vfree(pr->rq1_skba.arr);
vfree(pr->rq2_skba.arr);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 395dde444483..9e43c9ace9c2 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1549,7 +1549,7 @@ emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
ctrl);
/* skb fragments */
for (i = 0; i < nr_frags; ++i) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index fa4bb940665c..4f83f97ffe8b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1485,7 +1485,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
memcpy(dst + cur,
page_address(skb_frag_page(frag)) +
- frag->page_offset, skb_frag_size(frag));
+ skb_frag_off(frag), skb_frag_size(frag));
cur += skb_frag_size(frag);
}
} else {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index a41008523c98..71d3d8854d8f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -937,8 +937,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
DMA_TO_DEVICE);
- if (txdr->buffer_info[i].skb)
- dev_kfree_skb(txdr->buffer_info[i].skb);
+ dev_kfree_skb(txdr->buffer_info[i].skb);
}
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f703fa58458e..86493fea56e4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2889,9 +2889,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
@@ -4176,8 +4175,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* an error means any chain goes out the window
* too
*/
- if (rx_ring->rx_skb_top)
- dev_kfree_skb(rx_ring->rx_skb_top);
+ dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 08342698386d..de8c5818a305 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1126,8 +1126,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
buffer_info->dma,
buffer_info->length,
DMA_TO_DEVICE);
- if (buffer_info->skb)
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb(buffer_info->skb);
}
}
@@ -1139,8 +1138,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
dma_unmap_single(&pdev->dev,
buffer_info->dma,
2048, DMA_FROM_DEVICE);
- if (buffer_info->skb)
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb(buffer_info->skb);
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 395b05701480..a1fab77b2096 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
else
phy_reg |= 0xFA;
e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
}
hw->phy.ops.release(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index eb09c755fa17..1502895eb45d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -210,7 +210,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
#define HV_PM_CTRL_K1_ENABLE 0x4000
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e4baa13b3cda..8a3f035c3a5f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5579,9 +5579,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
@@ -6297,7 +6296,7 @@ fl_out:
static int e1000e_pm_freeze(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
@@ -6630,7 +6629,7 @@ static int __e1000_resume(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int e1000e_pm_thaw(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
e1000e_set_interrupt_capability(adapter);
@@ -6679,8 +6678,7 @@ static int e1000e_pm_resume(struct device *dev)
static int e1000e_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
u16 eee_lp;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 7d42582ed48d..b14441944b4b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_H_
#define _FM10K_H_
@@ -177,14 +177,10 @@ static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
#define MIN_Q_VECTORS 1
enum fm10k_non_q_vectors {
FM10K_MBX_VECTOR,
-#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF
- NON_Q_VECTORS_PF
+ NON_Q_VECTORS
};
-#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \
- NON_Q_VECTORS_PF : \
- NON_Q_VECTORS_VF)
-#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw))
+#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS)
struct fm10k_q_vector {
struct fm10k_intfc *interface;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 20768ac7f17e..c45315472245 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
@@ -36,7 +36,7 @@ static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
u8 num_tc = 0;
- int i, err;
+ int i;
/* verify type and determine num_tcs needed */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
@@ -57,7 +57,7 @@ static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
/* update TC hardware mapping if necessary */
if (num_tc != netdev_get_num_tc(dev)) {
- err = fm10k_setup_tc(dev, num_tc);
+ int err = fm10k_setup_tc(dev, num_tc);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index dca104121c05..1d27b2fb23af 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -160,8 +160,6 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx);
q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
- if (!q_vector->dbg_q_vector)
- return;
/* Generate a file for each rx ring in the q_vector */
for (i = 0; i < q_vector->tx.count; i++) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 4895dd83dd08..c681d2d28107 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/vmalloc.h>
@@ -222,7 +222,6 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
const unsigned int size)
{
unsigned int i;
- char *p;
if (!pointer) {
/* memory is not zero allocated so we have to clear it */
@@ -232,7 +231,7 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
}
for (i = 0; i < size; i++) {
- p = (char *)pointer + stats[i].stat_offset;
+ char *p = (char *)pointer + stats[i].stat_offset;
switch (stats[i].sizeof_stat) {
case sizeof(u64):
@@ -651,7 +650,6 @@ static int fm10k_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_q_vector *qv;
u16 tx_itr, rx_itr;
int i;
@@ -677,7 +675,8 @@ static int fm10k_set_coalesce(struct net_device *dev,
/* update q_vectors */
for (i = 0; i < interface->num_q_vectors; i++) {
- qv = interface->q_vector[i];
+ struct fm10k_q_vector *qv = interface->q_vector[i];
+
qv->tx.itr = tx_itr;
qv->rx.itr = rx_itr;
}
@@ -1115,13 +1114,12 @@ static void fm10k_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
/* report maximum channels */
ch->max_combined = fm10k_max_channels(dev);
/* report info for other vector */
- ch->max_other = NON_Q_VECTORS(hw);
+ ch->max_other = NON_Q_VECTORS;
ch->other_count = ch->max_other;
/* record RSS queues */
@@ -1133,14 +1131,13 @@ static int fm10k_set_channels(struct net_device *dev,
{
struct fm10k_intfc *interface = netdev_priv(dev);
unsigned int count = ch->combined_count;
- struct fm10k_hw *hw = &interface->hw;
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
/* verify other_count has not changed */
- if (ch->other_count != NON_Q_VECTORS(hw))
+ if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 8de77155f2e7..afe1fafd2447 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
#include "fm10k_vf.h"
@@ -426,7 +426,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
size_t size;
- int i, err;
+ int i;
/* return error if iov_data is already populated */
if (iov_data)
@@ -452,6 +452,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
/* loop through vf_info structures initializing each entry */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
+ int err;
/* Record VF VSI value */
vf_info->vsi = i + 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 90270b4a1682..e0a2be534b20 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -17,7 +17,7 @@ const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright(c) 2013 - 2018 Intel Corporation.";
+ "Copyright(c) 2013 - 2019 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
@@ -315,7 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
/* prefetch first cache line of first page */
prefetch(page_addr);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
#endif
/* allocate a skb to store the frags */
@@ -946,7 +946,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct fm10k_tx_buffer *tx_buffer;
struct fm10k_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned char *data;
dma_addr_t dma;
unsigned int data_len, size;
@@ -1074,7 +1074,8 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
@@ -1823,7 +1824,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
v_budget = min_t(u16, v_budget, num_online_cpus());
/* account for vectors not related to queues */
- v_budget += NON_Q_VECTORS(hw);
+ v_budget += NON_Q_VECTORS;
/* At the same time, hardware can only support a maximum of
* hw.mac->max_msix_vectors vectors. With features
@@ -1855,7 +1856,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
}
/* record the number of queues available for q_vectors */
- interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
+ interface->num_q_vectors = v_budget - NON_Q_VECTORS;
return 0;
}
@@ -1869,7 +1870,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
{
struct net_device *dev = interface->netdev;
- int pc, offset, rss_i, i, q_idx;
+ int pc, offset, rss_i, i;
u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
u8 num_pcs = netdev_get_num_tc(dev);
@@ -1879,7 +1880,8 @@ static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
rss_i = interface->ring_feature[RING_F_RSS].indices;
for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
- q_idx = pc;
+ int q_idx = pc;
+
for (i = 0; i < rss_i; i++) {
interface->tx_ring[offset + i]->reg_idx = q_idx;
interface->tx_ring[offset + i]->qos_pc = pc;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 21021fe4f1c3..75e51f91036c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_common.h"
@@ -297,13 +297,14 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
{
struct fm10k_mbx_fifo *fifo = &mbx->rx;
u16 total_len = 0, msg_len;
- u32 *msg;
/* length should include previous amounts pushed */
len += mbx->pushed;
/* offset in message is based off of current message size */
do {
+ u32 *msg;
+
msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len);
msg_len = FM10K_TLV_DWORD_LEN(*msg);
total_len += msg_len;
@@ -1920,7 +1921,6 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/* reduce length by 1 to convert to a mask */
u16 mbmem_len = mbx->mbmem_len - 1;
u16 tail_len, len = 0;
- u32 *msg;
/* push head behind tail */
if (mbx->tail < head)
@@ -1930,6 +1930,8 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/* determine msg aligned offset for end of buffer */
do {
+ u32 *msg;
+
msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len);
tail_len = len;
len += FM10K_TLV_DWORD_LEN(*msg);
@@ -2132,7 +2134,8 @@ fifo_err:
* DWORDs, not bytes. Any invalid values will cause the mailbox to return
* error.
**/
-s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+s32 fm10k_sm_mbx_init(struct fm10k_hw __always_unused *hw,
+ struct fm10k_mbx_info *mbx,
const struct fm10k_msg_data *msg_data)
{
mbx->mbx_reg = FM10K_GMBX;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 538a8467f434..09f7a246e134 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
#include <linux/vmalloc.h>
@@ -54,7 +54,7 @@ err:
**/
static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface)
{
- int i, err = 0;
+ int i, err;
for (i = 0; i < interface->num_tx_queues; i++) {
err = fm10k_setup_tx_resources(interface->tx_ring[i]);
@@ -121,7 +121,7 @@ err:
**/
static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface)
{
- int i, err = 0;
+ int i, err;
for (i = 0; i < interface->num_rx_queues; i++) {
err = fm10k_setup_rx_resources(interface->rx_ring[i]);
@@ -169,7 +169,6 @@ void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
**/
static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
{
- struct fm10k_tx_buffer *tx_buffer;
unsigned long size;
u16 i;
@@ -179,7 +178,8 @@ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
- tx_buffer = &tx_ring->tx_buffer[i];
+ struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
+
fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
}
@@ -253,8 +253,7 @@ static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
if (!rx_ring->rx_buffer)
return;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -871,7 +870,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err = -EHOSTDOWN;
+ int err;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
@@ -891,7 +890,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err = -EHOSTDOWN;
+ int err;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
@@ -1444,11 +1443,11 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
struct fm10k_l2_accel *l2_accel)
{
- struct fm10k_ring *ring;
int i;
for (i = 0; i < interface->num_rx_queues; i++) {
- ring = interface->rx_ring[i];
+ struct fm10k_ring *ring = interface->rx_ring[i];
+
rcu_assign_pointer(ring->l2_accel, l2_accel);
}
@@ -1463,7 +1462,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
struct fm10k_l2_accel *old_l2_accel = NULL;
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
- int size = 0, i;
+ int size, i;
u16 vid, glort;
/* The hardware supported by fm10k only filters on the destination MAC
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index e49fb51d3613..bb236fa44048 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -344,7 +344,6 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
struct net_device *netdev = interface->netdev;
u32 __iomem *hw_addr;
u32 value;
- int err;
/* do nothing if netdev is still present or hw_addr is set */
if (netif_device_present(netdev) || interface->hw.hw_addr)
@@ -362,6 +361,8 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
hw_addr = READ_ONCE(interface->uc_addr);
value = readl(hw_addr);
if (~value) {
+ int err;
+
/* Make sure the reset was initiated because we detached,
* otherwise we might race with a different reset flow.
*/
@@ -697,8 +698,6 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
*/
static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
{
- int i;
-
/* If we're down or resetting, just bail */
if (test_bit(__FM10K_DOWN, interface->state) ||
test_bit(__FM10K_RESETTING, interface->state))
@@ -710,6 +709,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
interface->next_tx_hang_check = jiffies + (2 * HZ);
if (netif_carrier_ok(interface->netdev)) {
+ int i;
+
/* Force detection of hung controller */
for (i = 0; i < interface->num_tx_queues; i++)
set_check_for_tx_hang(interface->tx_ring[i]);
@@ -897,7 +898,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
/* Map interrupt */
if (ring->q_vector) {
- txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
+ txint = ring->q_vector->v_idx + NON_Q_VECTORS;
txint |= FM10K_INT_MAP_TIMER0;
}
@@ -1036,7 +1037,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* Map interrupt */
if (ring->q_vector) {
- rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
+ rxint = ring->q_vector->v_idx + NON_Q_VECTORS;
rxint |= FM10K_INT_MAP_TIMER1;
}
@@ -1719,10 +1720,9 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
void fm10k_qv_free_irq(struct fm10k_intfc *interface)
{
int vector = interface->num_q_vectors;
- struct fm10k_hw *hw = &interface->hw;
struct msix_entry *entry;
- entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
+ entry = &interface->msix_entries[NON_Q_VECTORS + vector];
while (vector) {
struct fm10k_q_vector *q_vector;
@@ -1759,7 +1759,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface)
unsigned int ri = 0, ti = 0;
int vector, err;
- entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
+ entry = &interface->msix_entries[NON_Q_VECTORS];
for (vector = 0; vector < interface->num_q_vectors; vector++) {
struct fm10k_q_vector *q_vector = interface->q_vector[vector];
@@ -2339,7 +2339,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
/* Restart the MAC/VLAN request queue in-case of outstanding events */
fm10k_macvlan_schedule(interface);
- return err;
+ return 0;
}
/**
@@ -2352,7 +2352,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
**/
static int __maybe_unused fm10k_resume(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
+ struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
int err;
@@ -2379,7 +2379,7 @@ static int __maybe_unused fm10k_resume(struct device *dev)
**/
static int __maybe_unused fm10k_suspend(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
+ struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index cb4d02629b86..be07bfdb0bb4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -1152,7 +1152,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
* assumption is that in this case it is acceptable to just directly
* hand off the message from the VF to the underlying shared code.
**/
-s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 __always_unused **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
@@ -1352,7 +1352,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u32 *result;
s32 err = 0;
u32 msg[2];
u8 mode = 0;
@@ -1362,7 +1361,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
return FM10K_ERR_PARAM;
if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
- result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
+ u32 *result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
/* XCAST mode update requested */
err = fm10k_tlv_attr_get_u8(result, &mode);
@@ -1566,7 +1565,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
/* read remaining fields */
fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
fault->address <<= 32;
- fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
+ fault->address |= fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
/* clear valid bit to allow for next error */
@@ -1642,7 +1641,7 @@ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
* switch API.
**/
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u16 glort, mask;
u32 dglort_map;
@@ -1685,7 +1684,7 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
* This handler configures the default VLAN for the PF
**/
static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u16 glort, pvid;
u32 pvid_update;
@@ -1746,7 +1745,7 @@ const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
* messages that the PF has sent.
**/
s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
struct fm10k_swapi_error err_msg;
s32 err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 2a7a40bf2b1c..21eff0895a7a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_tlv.h"
@@ -472,7 +472,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
{
u32 i, attr_id, offset = 0;
- s32 err = 0;
+ s32 err;
u16 len;
/* verify pointers are not NULL */
@@ -587,8 +587,9 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* a minimum it just indicates that the message requested was
* unimplemented.
**/
-s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
+ u32 __always_unused **results,
+ struct fm10k_mbx_info __always_unused *mbx)
{
return FM10K_NOT_IMPLEMENTED;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 9fb9fca375e3..15ac1c7885bc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_TYPE_H_
#define _FM10K_TYPE_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index a8519c1f0406..dc8ccd378ec9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_vf.h"
@@ -198,7 +198,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
* This function should determine the MAC address for the VF
**/
s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u8 perm_addr[ETH_ALEN];
u16 vid;
@@ -267,8 +267,10 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
* This function is used to add or remove unicast MAC addresses for
* the VF.
**/
-static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
- const u8 *mac, u16 vid, bool add, u8 flags)
+static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
+ const u8 *mac, u16 vid, bool add,
+ u8 __always_unused flags)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[7];
@@ -309,7 +311,8 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
* This function is used to add or remove multicast MAC addresses for
* the VF.
**/
-static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
+static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
const u8 *mac, u16 vid, bool add)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
@@ -373,7 +376,7 @@ const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
* are ready to bring up the interface.
**/
s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
@@ -392,8 +395,9 @@ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
* enabled we can add filters, if it is disabled all filters for this
* logical port are flushed.
**/
-static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
- u16 count, bool enable)
+static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
+ u16 __always_unused count, bool enable)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[2];
@@ -420,7 +424,8 @@ static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
* so that it can enable either multicast, multicast promiscuous, or
* promiscuous mode of operation.
**/
-static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
+static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort, u8 mode)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[3];
@@ -475,7 +480,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
* that information to then populate a DGLORTMAP/DEC entry and the queues
* to which it has been assigned.
**/
-static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
+static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw __always_unused *hw,
struct fm10k_dglort_cfg *dglort)
{
/* verify the dglort pointer */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 84bd06901014..3e535d3263b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1021,6 +1021,7 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
return NULL;
}
void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_veb_stats(struct i40e_veb *veb);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 814acbe79ffd..72c04881d290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -610,8 +610,10 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 8))
+ hw->aq.api_min_ver >= 8)) {
hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 6536023fa074..21cccec328e3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0008
-#define I40E_FW_API_VERSION_MINOR_X710 0x0008
+#define I40E_FW_API_VERSION_MINOR_X722 0x0009
+#define I40E_FW_API_VERSION_MINOR_X710 0x0009
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -2051,20 +2051,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 906cf68d3453..46e649c09f72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -13,7 +13,7 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
{
i40e_status status = 0;
@@ -1577,19 +1577,22 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
- if (status)
- break;
-
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ switch (hw->aq.asq_last_status) {
+ case I40E_AQ_RC_EIO:
status = I40E_ERR_UNKNOWN_PHY;
break;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ case I40E_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
status = I40E_ERR_TIMEOUT;
+ break;
+ /* also covers I40E_AQ_RC_OK */
+ default:
+ break;
}
- } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
- (total_delay < max_delay));
+
+ } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ (total_delay < max_delay));
if (status)
return status;
@@ -1643,25 +1646,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
return status;
}
-/**
- * i40e_set_fc
- * @hw: pointer to the hw struct
- * @aq_failures: buffer to return AdminQ failure information
- * @atomic_restart: whether to enable atomic link restart
- *
- * Set the requested flow control mode using set_phy_config.
- **/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_restart)
+static noinline_for_stack enum i40e_status_code
+i40e_set_fc_status(struct i40e_hw *hw,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ bool atomic_restart)
{
- enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
- struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config;
- enum i40e_status_code status;
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
u8 pause_mask = 0x0;
- *aq_failures = 0x0;
-
switch (fc_mode) {
case I40E_FC_FULL:
pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
@@ -1677,6 +1670,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
break;
}
+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ /* clear the old pause settings */
+ config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities == abilities->abilities)
+ return 0;
+
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities->phy_type;
+ config.phy_type_ext = abilities->phy_type_ext;
+ config.link_speed = abilities->link_speed;
+ config.eee_capability = abilities->eee_capability;
+ config.eeer = abilities->eeer_val;
+ config.low_power_ctrl = abilities->d3_lpan;
+ config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ return i40e_aq_set_phy_config(hw, &config, NULL);
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status;
+
+ *aq_failures = 0x0;
+
/* Get the current phy config */
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
NULL);
@@ -1685,31 +1720,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
return status;
}
- memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
- /* clear the old pause settings */
- config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
- ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
- /* set the new abilities */
- config.abilities |= pause_mask;
- /* If the abilities have changed, then set the new config */
- if (config.abilities != abilities.abilities) {
- /* Auto restart link so settings take effect */
- if (atomic_restart)
- config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- /* Copy over all the old settings */
- config.phy_type = abilities.phy_type;
- config.phy_type_ext = abilities.phy_type_ext;
- config.link_speed = abilities.link_speed;
- config.eee_capability = abilities.eee_capability;
- config.eeer = abilities.eeer_val;
- config.low_power_ctrl = abilities.d3_lpan;
- config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_PHY_FEC_CONFIG_MASK;
- status = i40e_aq_set_phy_config(hw, &config, NULL);
+ status = i40e_set_fc_status(hw, &abilities, atomic_restart);
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
- if (status)
- *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
- }
/* Update the link info */
status = i40e_update_link_info(hw);
if (status) {
@@ -2537,7 +2551,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
* i40e_updatelink_status - update status of the HW network link
* @hw: pointer to the hw struct
**/
-i40e_status i40e_update_link_info(struct i40e_hw *hw)
+noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
{
struct i40e_aq_get_phy_abilities_resp abilities;
i40e_status status = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 292eeb3def10..200a1cb3b536 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -877,7 +877,23 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ u8 offset = 0;
+
+ if (hw->mac.type == I40E_MAC_XL710)
+ offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
+ else if (hw->mac.type == I40E_MAC_X722)
+ offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
+ else
+ return I40E_NOT_SUPPORTED;
+
+ ret = i40e_read_nvm_module_data(hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ offset, 1,
+ &lldp_cfg.adminstatus);
+ } else {
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ }
if (ret)
return I40E_ERR_NOT_READY;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index ddb48ae7cce4..2a80c5daa376 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -30,6 +30,8 @@
#define I40E_CEE_SUBTYPE_APP_PRI 4
#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
+#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 55d20acfcf70..41232898d8ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1732,29 +1732,15 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
- struct dentry *pfile;
const char *name = pci_name(pf->pdev);
- const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
- if (!pf->i40e_dbg_pf)
- return;
-
- pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_command_fops);
- if (!pfile)
- goto create_failed;
- pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_netdev_ops_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
- return;
-
-create_failed:
- dev_info(dev, "debugfs dir/file for %s failed\n", name);
- debugfs_remove_recursive(pf->i40e_dbg_pf);
+ debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 527eb52c5401..41e1240acaea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -711,6 +711,35 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
}
/**
+ * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
+ * @req_fec_info: mask request FEC info
+ * @ks: ethtool ksettings to fill in
+ **/
+static void i40e_get_settings_link_up_fec(u8 req_fec_info,
+ struct ethtool_link_ksettings *ks)
+{
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+
+ if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ } else {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ }
+ }
+}
+
+/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
* @ks: ethtool ksettings to fill in
@@ -769,13 +798,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -892,9 +915,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -908,10 +928,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
ethtool_link_ksettings_add_link_mode(ks, advertising,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -929,13 +946,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
break;
case I40E_PHY_TYPE_25GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_ACC:
@@ -945,13 +957,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -2250,7 +2257,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ struct i40e_veb *veb = NULL;
unsigned int i;
bool veb_stats;
u64 *p = data;
@@ -2273,8 +2280,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
goto check_data_pointer;
veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->lan_veb < I40E_MAX_VEB) &&
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+ if (veb_stats) {
+ veb = pf->veb[pf->lan_veb];
+ i40e_update_veb_stats(veb);
+ }
+
/* If veb stats aren't enabled, pass NULL instead of the veb so that
* we initialize stats to zero and update the data pointer
* intelligently
@@ -2329,7 +2342,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
}
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
+ goto check_data_pointer;
i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
@@ -2341,6 +2354,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
+check_data_pointer:
WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
}
@@ -5123,6 +5137,12 @@ static int i40e_get_module_info(struct net_device *netdev,
/* Module is not SFF-8472 compliant */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) {
+ /* Module is SFF-8472 compliant but doesn't implement
+ * Digital Diagnostic Monitoring (DDM).
+ */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 9ebbe3da61bb..fdf43d87e983 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -534,6 +534,10 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
sizeof(pf->veb[i]->stats));
memset(&pf->veb[i]->stats_offsets, 0,
sizeof(pf->veb[i]->stats_offsets));
+ memset(&pf->veb[i]->tc_stats, 0,
+ sizeof(pf->veb[i]->tc_stats));
+ memset(&pf->veb[i]->tc_stats_offsets, 0,
+ sizeof(pf->veb[i]->tc_stats_offsets));
pf->veb[i]->stat_offsets_loaded = false;
}
}
@@ -677,7 +681,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
* i40e_update_veb_stats - Update Switch component statistics
* @veb: the VEB being updated
**/
-static void i40e_update_veb_stats(struct i40e_veb *veb)
+void i40e_update_veb_stats(struct i40e_veb *veb)
{
struct i40e_pf *pf = veb->pf;
struct i40e_hw *hw = &pf->hw;
@@ -2530,6 +2534,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
+ vsi->netdev->name,
+ cur_multipromisc ? "entering" : "leaving");
}
}
@@ -3360,7 +3368,7 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(vsi->tx_rings[i]);
- if (!i40e_enabled_xdp_vsi(vsi))
+ if (err || !i40e_enabled_xdp_vsi(vsi))
return err;
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
@@ -6412,50 +6420,6 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
}
/**
- * i40e_update_dcb_config
- * @hw: pointer to the HW struct
- * @enable_mib_change: enable MIB change event
- *
- * Update DCB configuration from the firmware
- **/
-static enum i40e_status_code
-i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change)
-{
- struct i40e_lldp_variables lldp_cfg;
- i40e_status ret;
-
- if (!hw->func_caps.dcb)
- return I40E_NOT_SUPPORTED;
-
- /* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
- if (ret)
- return I40E_ERR_NOT_READY;
-
- /* Get DCBX status */
- ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
- if (ret)
- return ret;
-
- /* Check the DCBX Status */
- if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
- hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
- /* Get current DCBX configuration */
- ret = i40e_get_dcb_config(hw);
- if (ret)
- return ret;
- } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- return I40E_ERR_NOT_READY;
- }
-
- /* Configure the LLDP MIB change event */
- if (enable_mib_change)
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
-
- return ret;
-}
-
-/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -6477,7 +6441,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
goto out;
}
- err = i40e_update_dcb_config(hw, true);
+ err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
if ((!hw->func_caps.dcb) ||
@@ -8486,6 +8450,11 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf, lock_acquired);
+ dev_info(&pf->pdev->dev,
+ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ "FW LLDP is disabled\n" :
+ "FW LLDP is enabled\n");
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -14569,9 +14538,20 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
**/
static bool i40e_check_recovery_mode(struct i40e_pf *pf)
{
- u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
-
- if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
+ bool is_recovery_mode = false;
+
+ if (pf->hw.mac.type == I40E_MAC_XL710)
+ is_recovery_mode =
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
+ if (pf->hw.mac.type == I40E_MAC_X722)
+ is_recovery_mode =
+ val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
+ val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
+ if (is_recovery_mode) {
dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
set_bit(__I40E_RECOVERY_MODE, pf->state);
@@ -14585,6 +14565,51 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
}
/**
+ * i40e_pf_loop_reset - perform reset in a loop.
+ * @pf: board private structure
+ *
+ * This function is useful when a NIC is about to enter recovery mode.
+ * When a NIC's internal data structures are corrupted the NIC's
+ * firmware is going to enter recovery mode.
+ * Right after a POR it takes about 7 minutes for firmware to enter
+ * recovery mode. Until that time a NIC is in some kind of intermediate
+ * state. After that time period the NIC almost surely enters
+ * recovery mode. The only way for a driver to detect intermediate
+ * state is to issue a series of pf-resets and check a return value.
+ * If a PF reset returns success then the firmware could be in recovery
+ * mode so the caller of this code needs to check for recovery mode
+ * if this function returns success. There is a little chance that
+ * firmware will hang in intermediate state forever.
+ * Since waiting 7 minutes is quite a lot of time this function waits
+ * 10 seconds and then gives up by returning an error.
+ *
+ * Return 0 on success, negative on failure.
+ **/
+static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
+{
+ const unsigned short MAX_CNT = 1000;
+ const unsigned short MSECS = 10;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ int cnt;
+
+ for (cnt = 0; cnt < MAX_CNT; ++cnt) {
+ ret = i40e_pf_reset(hw);
+ if (!ret)
+ break;
+ msleep(MSECS);
+ }
+
+ if (cnt == MAX_CNT) {
+ dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
+ return ret;
+ }
+
+ pf->pfr_count++;
+ return ret;
+}
+
+/**
* i40e_init_recovery_mode - initialize subsystems needed in recovery mode
* @pf: board private structure
* @hw: ptr to the hardware info
@@ -14812,14 +14837,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Reset here to make sure all is clean and to define PF 'n' */
i40e_clear_hw(hw);
- if (!i40e_check_recovery_mode(pf)) {
- err = i40e_pf_reset(hw);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
- goto err_pf_reset;
- }
- pf->pfr_count++;
+
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
+ goto err_pf_reset;
+ }
+
+ err = i40e_pf_loop_reset(pf);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
}
+
+ i40e_check_recovery_mode(pf);
+
hw->aq.num_arq_entries = I40E_AQ_LEN;
hw->aq.num_asq_entries = I40E_AQ_LEN;
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
@@ -15605,8 +15638,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
**/
static int __maybe_unused i40e_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_pf *pf = dev_get_drvdata(dev);
struct i40e_hw *hw = &pf->hw;
/* If we're already suspended, then there is nothing to do */
@@ -15656,8 +15688,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
**/
static int __maybe_unused i40e_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_pf *pf = dev_get_drvdata(dev);
int err;
/* If we're not suspended, then there is nothing to do */
@@ -15674,7 +15705,7 @@ static int __maybe_unused i40e_resume(struct device *dev)
*/
err = i40e_restore_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
err);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index c508b75c3c09..e4d8d20baf3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -322,6 +322,77 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
+ * @hw: pointer to the HW structure
+ * @module_ptr: Pointer to module in words with respect to NVM beginning
+ * @offset: offset in words from module start
+ * @words_data_size: Words to read from NVM
+ * @data_ptr: Pointer to memory location where resulting buffer will be stored
+ **/
+i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr, u16 offset,
+ u16 words_data_size,
+ u16 *data_ptr)
+{
+ i40e_status status;
+ u16 ptr_value = 0;
+ u32 flat_offset;
+
+ if (module_ptr != 0) {
+ status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ }
+#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
+#define I40E_NVM_INVALID_VAL 0xFFFF
+
+ /* Pointer not initialized */
+ if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
+ ptr_value == I40E_NVM_INVALID_VAL)
+ return I40E_ERR_BAD_PTR;
+
+ /* Check whether the module is in SR mapped area or outside */
+ if (ptr_value & I40E_PTR_TYPE) {
+ /* Pointer points outside of the Shared RAM mapped area */
+ ptr_value &= ~I40E_PTR_TYPE;
+
+ /* PtrValue in 4kB units, need to convert to words */
+ ptr_value /= 2;
+ flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!status) {
+ status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
+ 2 * words_data_size,
+ data_ptr, true, NULL);
+ i40e_release_nvm(hw);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm aq failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ } else {
+ return I40E_ERR_NVM;
+ }
+ } else {
+ /* Read from the Shadow RAM */
+ status = i40e_read_nvm_buffer(hw, ptr_value + offset,
+ &words_data_size, data_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm buffer failed.Error code: %d.\n",
+ status);
+ }
+ }
+
+ return status;
+}
+
+/**
* i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -430,6 +501,36 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
}
/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+
+ return ret_code;
+}
+
+/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index eac88bcc6c06..5250441bf75b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -315,6 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
+i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr, u16 offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
@@ -326,6 +332,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 52e3680c57f8..d35d690ca10f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -58,7 +58,7 @@
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
#define I40E_PF_ARQT_ARQT_SHIFT 0
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -81,7 +81,7 @@
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
#define I40E_PF_ATQT_ATQT_SHIFT 0
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -108,7 +108,7 @@
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ARQT_MAX_INDEX 127
#define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -136,7 +136,7 @@
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ATQT_MAX_INDEX 127
#define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -259,7 +259,7 @@
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -363,6 +363,12 @@
#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
@@ -503,7 +509,7 @@
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1242,14 +1248,14 @@
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1658,7 +1664,7 @@
#define I40E_GLNVM_SRCTL_START_SHIFT 30
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -3025,7 +3031,7 @@
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3161,7 +3167,7 @@
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3184,7 +3190,7 @@
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2a2fe3ec7926..e3f29dc8b290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
bool __i40e_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* descriptor associated with the fragment.
*/
if (stale_size > I40E_MAX_DATA_PER_TXD) {
- int align_pad = -(stale->page_offset) &
+ int align_pad = -(skb_frag_off(stale)) &
(I40E_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
@@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 100e92d2982f..36d37f31a287 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
**/
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 8f43aa47c263..b43ec94a0f29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -443,6 +443,7 @@ struct i40e_nvm_access {
#define I40E_MODULE_SFF_8472_COMP 0x5E
#define I40E_MODULE_SFF_8472_SWAP 0x5C
#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40
#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
#define I40E_MODULE_TYPE_QSFP28 0x11
#define I40E_MODULE_QSFP_MAX_LEN 640
@@ -623,6 +624,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
u64 flags;
/* Used in set switch config AQ command */
@@ -1316,6 +1318,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02b09a8ad54c..f8aa4deceb5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -55,7 +55,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
- if (vf->link_forced) {
+
+ /* Always report link is down if the VF queues aren't enabled */
+ if (!vf->queues_enabled) {
+ pfe.event_data.link_event.link_status = false;
+ pfe.event_data.link_event.link_speed = 0;
+ } else if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
@@ -65,6 +70,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
+
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -2037,30 +2043,33 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
alluni = true;
aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
alluni);
- if (!aq_ret) {
- if (allmulti) {
+ if (aq_ret)
+ goto err_out;
+
+ if (allmulti) {
+ if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
"VF %d successfully set multicast promiscuous mode\n",
vf->vf_id);
- set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- } else {
- dev_info(&pf->pdev->dev,
- "VF %d successfully unset multicast promiscuous mode\n",
- vf->vf_id);
- clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- }
- if (alluni) {
+ } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
+
+ if (alluni) {
+ if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
"VF %d successfully set unicast promiscuous mode\n",
vf->vf_id);
- set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- } else {
- dev_info(&pf->pdev->dev,
- "VF %d successfully unset unicast promiscuous mode\n",
- vf->vf_id);
- clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- }
- }
+ } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+
err_out:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
@@ -2153,7 +2162,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* VF does not know about these additional VSIs and all
* it cares is about its own queues. PF configures these queues
* to its appropriate VSIs based on TC mapping
- **/
+ */
if (vf->adq_enabled) {
if (idx >= ARRAY_SIZE(vf->ch)) {
aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
@@ -2364,6 +2373,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
+ vf->queues_enabled = true;
+
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2385,6 +2396,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
+ /* Immediately mark queues as disabled */
+ vf->queues_enabled = false;
+
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -3953,10 +3967,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
+ * If the VF is indeed in reset, the vsi pointer has
+ * to show on the newly loaded vsi under pf->vsi[id].
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ if (i > 0)
+ vsi = pf->vsi[vf->lan_vsi_idx];
break;
+ }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index f65cc0c16550..7164b9bb294f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -99,6 +99,7 @@ struct i40e_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
+ bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_mac;
u16 num_vlan;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 0cca1b589b56..7a30d5d5ef53 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2161,7 +2161,7 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
**/
bool __iavf_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb)
* descriptor associated with the fragment.
*/
if (stale_size > IAVF_MAX_DATA_PER_TXD) {
- int align_pad = -(stale->page_offset) &
+ int align_pad = -(skb_frag_off(stale)) &
(IAVF_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
@@ -2269,7 +2269,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct iavf_tx_buffer *tx_bi;
struct iavf_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 71e7d090f8db..dd3348f9da9d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -462,7 +462,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
**/
static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 9ee6b55553c0..fb2bc836b20a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -69,11 +69,10 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
-#define ICE_MBXQ_LEN 64
+#define ICE_MBXSQ_LEN 64
+#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
-#define ICE_MAX_TXQS 2048
-#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
#define ICE_MAX_SCATTER_TXQS 16
@@ -86,16 +85,6 @@ extern const char ice_drv_ver[];
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
-#define ICE_MAX_VF_COUNT 256
-#define ICE_MAX_QS_PER_VF 256
-#define ICE_MIN_QS_PER_VF 1
-#define ICE_DFLT_QS_PER_VF 4
-#define ICE_NONQ_VECS_VF 1
-#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_BASE_QS_PER_VF 16
-#define ICE_MAX_INTR_PER_VF 65
-#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_RESET_WAIT 20
@@ -220,6 +209,7 @@ enum ice_state {
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
+ __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_STATE_NBITS /* must be last */
};
@@ -292,8 +282,8 @@ struct ice_vsi {
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */
- u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */
+ u16 *txq_map; /* index in pf->avail_txqs */
+ u16 *rxq_map; /* index in pf->avail_rxqs */
u16 alloc_txq; /* Allocated Tx queues */
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
@@ -329,7 +319,6 @@ struct ice_q_vector {
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
- ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
@@ -337,7 +326,8 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
- ICE_FLAG_ENABLE_FW_LLDP,
+ ICE_FLAG_NO_MEDIA,
+ ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -363,9 +353,9 @@ struct ice_pf {
u16 num_vf_qps; /* num queue pairs per VF */
u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
- DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
- DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
+ unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
+ unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
@@ -376,6 +366,8 @@ struct ice_pf {
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */
u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u16 max_pf_txqs; /* Total Tx queues PF wide */
+ u16 max_pf_rxqs; /* Total Rx queues PF wide */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
@@ -455,6 +447,8 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_update_vsi_stats(struct ice_vsi *vsi);
+void ice_update_pf_stats(struct ice_pf *pf);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 765e3c2ed045..bf9aa533a7c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1610,6 +1610,7 @@ enum ice_aq_err {
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
};
/* Admin Queue command opcodes */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2e0731c1e1a3..302ad981129c 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -263,21 +263,23 @@ enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
- struct ice_link_status *hw_link_info_old, *hw_link_info;
struct ice_aqc_get_link_status_data link_data = { 0 };
struct ice_aqc_get_link_status *resp;
+ struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
enum ice_status status;
+ struct ice_hw *hw;
u16 cmd_flags;
if (!pi)
return ICE_ERR_PARAM;
- hw_link_info_old = &pi->phy.link_info_old;