aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/bcm_sf2.h3
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c497
-rw-r--r--drivers/net/dsa/microchip/Kconfig16
-rw-r--r--drivers/net/dsa/microchip/Makefile5
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1316
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h (renamed from drivers/net/dsa/microchip/ksz_9477_reg.h)17
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c177
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c1183
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h214
-rw-r--r--drivers/net/dsa/microchip/ksz_priv.h245
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c217
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.h69
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c38
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c41
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c876
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.h36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c55
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c162
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c109
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h135
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h58
-rw-r--r--drivers/net/ethernet/arc/emac_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c83
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c65
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c105
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c71
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c71
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c452
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c19
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c18
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c265
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c502
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h53
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c70
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c487
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c753
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h37
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c154
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c614
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c55
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c14
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c30
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/intel/e100.c10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c25
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c223
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c27
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c24
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c108
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h280
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c971
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h180
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c525
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c1383
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h214
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c454
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c256
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c30
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c283
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c370
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c344
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h206
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c850
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c164
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c243
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c3
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c65
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h12
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c13
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c388
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c23
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c60
-rw-r--r--drivers/net/ethernet/sfc/ef10.c7
-rw-r--r--drivers/net/ethernet/sfc/tx.c5
-rw-r--r--drivers/net/ethernet/socionext/netsec.c238
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw.c237
-rw-r--r--drivers/net/ethernet/ti/cpts.c32
-rw-r--r--drivers/net/ethernet/ti/cpts.h38
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c32
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c29
-rw-r--r--drivers/net/geneve.c111
-rw-r--r--drivers/net/hyperv/netvsc_drv.c9
-rw-r--r--drivers/net/netdevsim/bpf.c52
-rw-r--r--drivers/net/phy/amd.c1
-rw-r--r--drivers/net/phy/aquantia.c15
-rw-r--r--drivers/net/phy/at803x.c3
-rw-r--r--drivers/net/phy/bcm63xx.c6
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm87xx.c10
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/davicom.c4
-rw-r--r--drivers/net/phy/dp83640.c1
-rw-r--r--drivers/net/phy/dp83822.c1
-rw-r--r--drivers/net/phy/dp83848.c1
-rw-r--r--drivers/net/phy/dp83867.c1
-rw-r--r--drivers/net/phy/dp83tc811.c1
-rw-r--r--drivers/net/phy/fixed_phy.c19
-rw-r--r--drivers/net/phy/icplus.c145
-rw-r--r--drivers/net/phy/intel-xway.c10
-rw-r--r--drivers/net/phy/lxt.c6
-rw-r--r--drivers/net/phy/marvell.c93
-rw-r--r--drivers/net/phy/marvell10g.c37
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/phy/microchip.c1
-rw-r--r--drivers/net/phy/microchip_t1.c1
-rw-r--r--drivers/net/phy/mscc.c6
-rw-r--r--drivers/net/phy/national.c1
-rw-r--r--drivers/net/phy/phy-c45.c12
-rw-r--r--drivers/net/phy/phy-core.c213
-rw-r--r--drivers/net/phy/phy.c443
-rw-r--r--drivers/net/phy/phy_device.c192
-rw-r--r--drivers/net/phy/phy_led_triggers.c15
-rw-r--r--drivers/net/phy/phylink.c19
-rw-r--r--drivers/net/phy/qsemi.c1
-rw-r--r--drivers/net/phy/realtek.c45
-rw-r--r--drivers/net/phy/smsc.c7
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/uPD60620.c6
-rw-r--r--drivers/net/phy/vitesse.c21
-rw-r--r--drivers/net/tun.c49
-rw-r--r--drivers/net/usb/Kconfig12
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/aqc111.c1457
-rw-r--r--drivers/net/usb/aqc111.h232
-rw-r--r--drivers/net/usb/cdc_ether.c26
-rw-r--r--drivers/net/usb/lan78xx.c27
-rw-r--r--drivers/net/usb/smsc95xx.c55
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/vrf.c19
-rw-r--r--drivers/net/vxlan.c252
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c137
-rw-r--r--drivers/net/xen-netfront.c2
309 files changed, 19032 insertions, 6117 deletions
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index ed6828821fbd..80af658e530d 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -207,7 +207,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
return PTR_ERR(peer_net);
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
- &vxcan_link_ops, tbp);
+ &vxcan_link_ops, tbp, extack);
if (IS_ERR(peer)) {
put_net(peer_net);
return PTR_ERR(peer);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2eb68769562c..aa4a1f5206f1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -710,6 +710,10 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
return ret;
}
+ ret = bcm_sf2_cfp_resume(ds);
+ if (ret)
+ return ret;
+
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
@@ -1061,6 +1065,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
spin_lock_init(&priv->indir_lock);
mutex_init(&priv->stats_mutex);
mutex_init(&priv->cfp.lock);
+ INIT_LIST_HEAD(&priv->cfp.rules_list);
/* CFP rule #0 cannot be used for specific classifications, flag it as
* permanently used
@@ -1090,12 +1095,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
return ret;
}
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
return ret;
}
+ bcm_sf2_gphy_enable_set(priv->dev->ds, false);
+
ret = bcm_sf2_cfp_rst(priv);
if (ret) {
pr_err("failed to reset CFP\n");
@@ -1166,6 +1175,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
priv->wol_ports_mask = 0;
dsa_unregister_switch(priv->dev->ds);
+ bcm_sf2_cfp_exit(priv->dev->ds);
/* Disable all ports and interrupts */
bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index cc31e986e6e3..faaef320ec48 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -56,6 +56,7 @@ struct bcm_sf2_cfp_priv {
DECLARE_BITMAP(used, CFP_NUM_RULES);
DECLARE_BITMAP(unique, CFP_NUM_RULES);
unsigned int rules_cnt;
+ struct list_head rules_list;
};
struct bcm_sf2_priv {
@@ -213,5 +214,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc);
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
+void bcm_sf2_cfp_exit(struct dsa_switch *ds);
+int bcm_sf2_cfp_resume(struct dsa_switch *ds);
#endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 47c5f272a084..e14663ab6dbc 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -20,6 +20,12 @@
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
+struct cfp_rule {
+ int port;
+ struct ethtool_rx_flow_spec fs;
+ struct list_head next;
+};
+
struct cfp_udf_slice_layout {
u8 slices[UDFS_PER_SLICE];
u32 mask_value;
@@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
core_writel(priv, reg, offset);
}
+static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
+ int port, u32 location)
+{
+ struct cfp_rule *rule = NULL;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ if (rule->port == port && rule->fs.location == location)
+ break;
+ }
+
+ return rule;
+}
+
+static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct cfp_rule *rule = NULL;
+ size_t fs_size = 0;
+ int ret = 1;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = 1;
+ if (rule->port != port)
+ continue;
+
+ if (rule->fs.flow_type != fs->flow_type ||
+ rule->fs.ring_cookie != fs->ring_cookie ||
+ rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+ continue;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
+ ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+ if (ret == 0)
+ break;
+ }
+
+ return ret;
+}
+
static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int port_num,
unsigned int queue_num,
@@ -728,27 +789,14 @@ out_err:
return ret;
}
-static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
- struct ethtool_rx_flow_spec *fs)
+static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
- int ret = -EINVAL;
-
- /* Check for unsupported extensions */
- if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
- fs->m_ext.data[1]))
- return -EINVAL;
-
- if (fs->location != RX_CLS_LOC_ANY &&
- test_bit(fs->location, priv->cfp.used))
- return -EBUSY;
-
- if (fs->location != RX_CLS_LOC_ANY &&
- fs->location > bcm_sf2_cfp_rule_size(priv))
- return -EINVAL;
+ int ret;
/* This rule is a Wake-on-LAN filter and we must specifically
* target the CPU port in order for it to be working.
@@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
queue_num, fs);
break;
default:
+ ret = -EINVAL;
break;
}
return ret;
}
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule = NULL;
+ int ret = -EINVAL;
+
+ /* Check for unsupported extensions */
+ if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
+ fs->m_ext.data[1]))
+ return -EINVAL;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ test_bit(fs->location, priv->cfp.used))
+ return -EBUSY;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
+ if (ret == 0)
+ return -EEXIST;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->port = port;
+ memcpy(&rule->fs, fs, sizeof(*fs));
+ list_add_tail(&rule->next, &priv->cfp.rules_list);
+
+ return ret;
+}
+
static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
u32 loc, u32 *next_loc)
{
@@ -830,19 +920,12 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
return 0;
}
-static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
- u32 loc)
+static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
+ u32 loc)
{
u32 next_loc = 0;
int ret;
- /* Refuse deleting unused rules, and those that are not unique since
- * that could leave IPv6 rules with one of the chained rule in the
- * table.
- */
- if (!test_bit(loc, priv->cfp.unique) || loc == 0)
- return -EINVAL;
-
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret)
return ret;
@@ -854,318 +937,54 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
return ret;
}
-static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
{
- unsigned int i;
-
- for (i = 0; i < sizeof(flow->m_u); i++)
- flow->m_u.hdata[i] ^= 0xff;
-
- flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
- flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
- flow->m_ext.data[0] ^= cpu_to_be32(~0);
- flow->m_ext.data[1] ^= cpu_to_be32(~0);
-}
-
-static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
- struct ethtool_tcpip4_spec *v4_spec,
- bool mask)
-{
- u32 reg, offset, ipv4;
- u16 src_dst_port;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(3);
- else
- offset = CORE_CFP_DATA_PORT(3);
-
- reg = core_readl(priv, offset);
- /* src port [15:8] */
- src_dst_port = reg << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(2);
- else
- offset = CORE_CFP_DATA_PORT(2);
-
- reg = core_readl(priv, offset);
- /* src port [7:0] */
- src_dst_port |= (reg >> 24);
-
- v4_spec->pdst = cpu_to_be16(src_dst_port);
- v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
-
- /* IPv4 dst [15:8] */
- ipv4 = (reg & 0xff) << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(1);
- else
- offset = CORE_CFP_DATA_PORT(1);
-
- reg = core_readl(priv, offset);
- /* IPv4 dst [31:16] */
- ipv4 |= ((reg >> 8) & 0xffff) << 16;
- /* IPv4 dst [7:0] */
- ipv4 |= (reg >> 24) & 0xff;
- v4_spec->ip4dst = cpu_to_be32(ipv4);
-
- /* IPv4 src [15:8] */
- ipv4 = (reg & 0xff) << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(0);
- else
- offset = CORE_CFP_DATA_PORT(0);
- reg = core_readl(priv, offset);
+ struct cfp_rule *rule;
+ int ret;
- /* Once the TCAM is programmed, the mask reflects the slice number
- * being matched, don't bother checking it when reading back the
- * mask spec
+ /* Refuse deleting unused rules, and those that are not unique since
+ * that could leave IPv6 rules with one of the chained rule in the
+ * table.
*/
- if (!mask && !(reg & SLICE_VALID))
+ if (!test_bit(loc, priv->cfp.unique) || loc == 0)
return -EINVAL;
- /* IPv4 src [7:0] */
- ipv4 |= (reg >> 24) & 0xff;
- /* IPv4 src [31:16] */
- ipv4 |= ((reg >> 8) & 0xffff) << 16;
- v4_spec->ip4src = cpu_to_be32(ipv4);
-
- return 0;
-}
-
-static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs)
-{
- struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
- u32 reg;
- int ret;
-
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- fs->flow_type = TCP_V4_FLOW;
- v4_spec = &fs->h_u.tcp_ip4_spec;
- v4_m_spec = &fs->m_u.tcp_ip4_spec;
- break;
- case IPPROTO_UDP:
- fs->flow_type = UDP_V4_FLOW;
- v4_spec = &fs->h_u.udp_ip4_spec;
- v4_m_spec = &fs->m_u.udp_ip4_spec;
- break;
- default:
+ rule = bcm_sf2_cfp_rule_find(priv, port, loc);
+ if (!rule)
return -EINVAL;
- }
-
- fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
- v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
-
- ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
- if (ret)
- return ret;
-
- return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
-}
-
-static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
- __be32 *ip6_addr, __be16 *port,
- bool mask)
-{
- u32 reg, tmp, offset;
-
- /* C-Tag [31:24]
- * UDF_n_B8 [23:8] (port)
- * UDF_n_B7 (upper) [7:0] (addr[15:8])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(4);
- else
- offset = CORE_CFP_DATA_PORT(4);
- reg = core_readl(priv, offset);
- *port = cpu_to_be32(reg) >> 8;
- tmp = (u32)(reg & 0xff) << 8;
-
- /* UDF_n_B7 (lower) [31:24] (addr[7:0])
- * UDF_n_B6 [23:8] (addr[31:16])
- * UDF_n_B5 (upper) [7:0] (addr[47:40])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(3);
- else
- offset = CORE_CFP_DATA_PORT(3);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[3] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
-
- /* UDF_n_B5 (lower) [31:24] (addr[39:32])
- * UDF_n_B4 [23:8] (addr[63:48])
- * UDF_n_B3 (upper) [7:0] (addr[79:72])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(2);
- else
- offset = CORE_CFP_DATA_PORT(2);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[2] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
- /* UDF_n_B3 (lower) [31:24] (addr[71:64])
- * UDF_n_B2 [23:8] (addr[95:80])
- * UDF_n_B1 (upper) [7:0] (addr[111:104])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(1);
- else
- offset = CORE_CFP_DATA_PORT(1);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[1] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
+ ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
- /* UDF_n_B1 (lower) [31:24] (addr[103:96])
- * UDF_n_B0 [23:8] (addr[127:112])
- * Reserved [7:4]
- * Slice ID [3:2]
- * Slice valid [1:0]
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(0);
- else
- offset = CORE_CFP_DATA_PORT(0);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[0] = cpu_to_be32(tmp);
+ list_del(&rule->next);
+ kfree(rule);
- if (!mask && !(reg & SLICE_VALID))
- return -EINVAL;
-
- return 0;
+ return ret;
}
-static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs,
- u32 next_loc)
+static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
{
- struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
- u32 reg;
- int ret;
-
- /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
- * assuming tcp_ip6_spec here being an union.
- */
- v6_spec = &fs->h_u.tcp_ip6_spec;
- v6_m_spec = &fs->m_u.tcp_ip6_spec;
-
- /* Read the second half first */
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
- false);
- if (ret)
- return ret;
-
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
- &v6_m_spec->pdst, true);
- if (ret)
- return ret;
-
- /* Read last to avoid next entry clobbering the results during search
- * operations. We would not have the port enabled for this rule, so
- * don't bother checking it.
- */
- (void)core_readl(priv, CORE_CFP_DATA_PORT(7));
-
- /* The slice number is valid, so read the rule we are chained from now
- * which is our first half.
- */
- bcm_sf2_cfp_rule_addr_set(priv, next_loc);
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- fs->flow_type = TCP_V6_FLOW;
- break;
- case IPPROTO_UDP:
- fs->flow_type = UDP_V6_FLOW;
- break;
- default:
- return -EINVAL;
- }
+ unsigned int i;
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
- false);
- if (ret)
- return ret;
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xff;
- return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
- &v6_m_spec->psrc, true);
+ flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rxnfc *nfc)
{
- u32 reg, ipv4_or_chain_id;
- unsigned int queue_num;
- int ret;
-
- bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_ACT_POL_DATA0);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
-
- /* Extract the destination port */
- nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
- DST_MAP_IB_MASK) - 1;
-
- /* There is no Port 6, so we compensate for that here */
- if (nfc->fs.ring_cookie >= 6)
- nfc->fs.ring_cookie++;
- nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
-
- /* Extract the destination queue */
- queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
- nfc->fs.ring_cookie += queue_num;
-
- /* Extract the L3_FRAMING or CHAIN_ID */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+ struct cfp_rule *rule;
- /* With IPv6 rules this would contain a non-zero chain ID since
- * we reserve entry 0 and it cannot be used. So if we read 0 here
- * this means an IPv4 rule.
- */
- ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
- if (ipv4_or_chain_id == 0)
- ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
- else
- ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
- ipv4_or_chain_id);
- if (ret)
- return ret;
-
- /* Read last to avoid next entry clobbering the results during search
- * operations
- */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
- if (!(reg & 1 << port))
+ rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
+ if (!rule)
return -EINVAL;
+ memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
+
bcm_sf2_invert_masks(&nfc->fs);
/* Put the TCAM size here */
@@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
return 0;
}
+
+void bcm_sf2_cfp_exit(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule, *n;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return;
+
+ list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
+ bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
+}
+
+int bcm_sf2_cfp_resume(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule;
+ int ret = 0;
+ u32 reg;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg &= ~CFP_EN_MAP_MASK;
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ ret = bcm_sf2_cfp_rst(priv);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
+ rule->fs.location);
+ if (ret) {
+ dev_err(ds->dev, "failed to remove rule\n");
+ return ret;
+ }
+
+ ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
+ if (ret) {
+ dev_err(ds->dev, "failed to restore rule\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index a8b8f59099ce..a8caf9249d50 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,12 +1,16 @@
-menuconfig MICROCHIP_KSZ
- tristate "Microchip KSZ series switch support"
+config NET_DSA_MICROCHIP_KSZ_COMMON
+ tristate
+
+menuconfig NET_DSA_MICROCHIP_KSZ9477
+ tristate "Microchip KSZ9477 series switch support"
depends on NET_DSA
select NET_DSA_TAG_KSZ
+ select NET_DSA_MICROCHIP_KSZ_COMMON
help
- This driver adds support for Microchip KSZ switch chips.
+ This driver adds support for Microchip KSZ9477 switch chips.
-config MICROCHIP_KSZ_SPI_DRIVER
- tristate "KSZ series SPI connected switch driver"
- depends on MICROCHIP_KSZ && SPI
+config NET_DSA_MICROCHIP_KSZ9477_SPI
+ tristate "KSZ9477 series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
help
Select to enable support for registering switches configured through SPI.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index ed335e29fae8..3142c18b8f57 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_MICROCHIP_KSZ) += ksz_common.o
-obj-$(CONFIG_MICROCHIP_KSZ_SPI_DRIVER) += ksz_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
new file mode 100644
index 000000000000..0684657fbf9a
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ9477 switch driver main logic
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "ksz_priv.h"
+#include "ksz_common.h"
+#include "ksz9477_reg.h"
+
+static const struct {
+ int index;
+ char string[ETH_GSTRING_LEN];
+} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
+ { 0x00, "rx_hi" },
+ { 0x01, "rx_undersize" },
+ { 0x02, "rx_fragments" },
+ { 0x03, "rx_oversize" },
+ { 0x04, "rx_jabbers" },
+ { 0x05, "rx_symbol_err" },
+ { 0x06, "rx_crc_err" },
+ { 0x07, "rx_align_err" },
+ { 0x08, "rx_mac_ctrl" },
+ { 0x09, "rx_pause" },
+ { 0x0A, "rx_bcast" },
+ { 0x0B, "rx_mcast" },
+ { 0x0C, "rx_ucast" },
+ { 0x0D, "rx_64_or_less" },
+ { 0x0E, "rx_65_127" },
+ { 0x0F, "rx_128_255" },
+ { 0x10, "rx_256_511" },
+ { 0x11, "rx_512_1023" },
+ { 0x12, "rx_1024_1522" },
+ { 0x13, "rx_1523_2000" },
+ { 0x14, "rx_2001" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1A, "tx_ucast" },
+ { 0x1B, "tx_deferred" },
+ { 0x1C, "tx_total_col" },
+ { 0x1D, "tx_exc_col" },
+ { 0x1E, "tx_single_col" },
+ { 0x1F, "tx_mult_col" },
+ { 0x80, "rx_total" },
+ { 0x81, "tx_total" },
+ { 0x82, "rx_discards" },
+ { 0x83, "tx_discards" },
+};
+
+static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
+{
+ u32 data;
+
+ ksz_read32(dev, addr, &data);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ ksz_write32(dev, addr, data);
+}
+
+static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
+ u32 bits, bool set)
+{
+ u32 addr;
+ u32 data;
+
+ addr = PORT_CTRL_ADDR(port, offset);
+ ksz_read32(dev, addr, &data);
+
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+
+ ksz_write32(dev, addr, data);
+}
+
+static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton,
+ int timeout)
+{
+ u8 data;
+
+ do {
+ ksz_read8(dev, REG_SW_VLAN_CTRL, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
+ u32 *vlan_table)
+{
+ int ret;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+ ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
+
+ /* wait to be cleared */
+ ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read vlan table\n");
+ goto exit;
+ }
+
+ ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
+ ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
+ ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
+
+ ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+exit:
+ mutex_unlock(&dev->vlan_mutex);
+
+ return ret;
+}
+
+static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
+ u32 *vlan_table)
+{
+ int ret;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
+ ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
+ ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
+
+ ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+ ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
+
+ /* wait to be cleared */
+ ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to write vlan table\n");
+ goto exit;
+ }
+
+ ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+ /* update vlan cache table */
+ dev->vlan_cache[vid].table[0] = vlan_table[0];
+ dev->vlan_cache[vid].table[1] = vlan_table[1];
+ dev->vlan_cache[vid].table[2] = vlan_table[2];
+
+exit:
+ mutex_unlock(&dev->vlan_mutex);
+
+ return ret;
+}
+
+static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
+{
+ ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
+ ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
+ ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
+}
+
+static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
+{
+ ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
+ ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
+ ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
+ ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
+}
+
+static int ksz9477_wait_alu_ready(struct ksz_device *dev, u32 waiton,
+ int timeout)
+{
+ u32 data;
+
+ do {
+ ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev, u32 waiton,
+ int timeout)
+{
+ u32 data;
+
+ do {
+ ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ksz9477_reset_switch(struct ksz_device *dev)
+{
+ u8 data8;
+ u16 data16;
+ u32 data32;
+
+ /* reset switch */
+ ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+
+ /* turn off SPI DO Edge select */
+ ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+ data8 &= ~SPI_AUTO_EDGE_DETECTION;
+ ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+
+ /* default configuration */
+ ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+ data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
+ SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
+ ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+
+ /* disable interrupts */
+ ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
+ ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+
+ /* set broadcast storm protection 10% rate */
+ ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16);
+ data16 &= ~BROADCAST_STORM_RATE;
+ data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
+ ksz_write16(dev, REG_SW_MAC_CTRL_2, data16);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ return DSA_TAG_PROTO_KSZ;
+}
+
+static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+ struct ksz_device *dev = ds->priv;
+ u16 val = 0xffff;
+
+ /* No real PHY after this. Simulate the PHY.
+ * A fixed PHY can be setup in the device tree, but this function is
+ * still called for that port during initialization.
+ * For RGMII PHY there is no way to access it so the fixed PHY should
+ * be used. For SGMII PHY the supporting code will be added later.
+ */
+ if (addr >= dev->phy_port_cnt) {
+ struct ksz_port *p = &dev->ports[addr];
+
+ switch (reg) {
+ case MII_BMCR:
+ val = 0x1140;
+ break;
+ case MII_BMSR:
+ val = 0x796d;
+ break;
+ case MII_PHYSID1:
+ val = 0x0022;
+ break;
+ case MII_PHYSID2:
+ val = 0x1631;
+ break;
+ case MII_ADVERTISE:
+ val = 0x05e1;
+ break;
+ case MII_LPA:
+ val = 0xc5e1;
+ break;
+ case MII_CTRL1000:
+ val = 0x0700;
+ break;
+ case MII_STAT1000:
+ if (p->phydev.speed == SPEED_1000)
+ val = 0x3800;
+ else
+ val = 0;
+ break;
+ }
+ } else {
+ ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ }
+
+ return val;
+}
+
+static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
+ u16 val)
+{
+ struct ksz_device *dev = ds->priv;
+
+ /* No real PHY after this. */
+ if (addr >= dev->phy_port_cnt)
+ return 0;
+ ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+
+ return 0;
+}
+
+static void ksz9477_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+ u32 data;
+ int timeout;
+
+ mutex_lock(&dev->stats_mutex);
+
+ for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ data = MIB_COUNTER_READ;
+ data |= ((ksz9477_mib_names[i].index & 0xFF) <<
+ MIB_COUNTER_INDEX_S);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
+
+ timeout = 1000;
+ do {
+ ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
+ &data);
+ usleep_range(1, 10);
+ if (!(data & MIB_COUNTER_READ))
+ break;
+ } while (timeout-- > 0);
+
+ /* failed to read MIB. get out of loop */
+ if (!timeout) {
+ dev_dbg(dev->dev, "Failed to get MIB\n");
+ break;
+ }
+
+ /* count resets upon read */
+ ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
+
+ dev->mib_value[i] += (uint64_t)data;
+ buf[i] = dev->mib_value[i];
+ }
+
+ mutex_unlock(&dev->stats_mutex);
+}
+
+static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
+ u8 member)
+{
+ ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
+ dev->ports[port].member = member;
+}
+
+static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+ u8 data;
+ int member = -1;
+
+ ksz_pread8(dev, port, P_STP_CTRL, &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ if (port != dev->cpu_port)
+ member = 0;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ if (port != dev->cpu_port &&
+ p->stp_state == BR_STATE_DISABLED)
+ member = dev->host_mask | p->vid_member;
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+
+ /* This function is also used internally. */
+ if (port == dev->cpu_port)
+ break;
+
+ member = dev->host_mask | p->vid_member;
+
+ /* Port is a member of a bridge. */
+ if (dev->br_member & (1 << port)) {
+ dev->member |= (1 << port);
+ member = dev->member;
+ }
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ if (port != dev->cpu_port &&
+ p->stp_state == BR_STATE_DISABLED)
+ member = dev->host_mask | p->vid_member;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ p->stp_state = state;
+ if (data & PORT_RX_ENABLE)
+ dev->rx_ports |= (1 << port);
+ else
+ dev->rx_ports &= ~(1 << port);
+ if (data & PORT_TX_ENABLE)
+ dev->tx_ports |= (1 << port);
+ else
+ dev->tx_ports &= ~(1 << port);
+
+ /* Port membership may share register with STP state. */
+ if (member >= 0 && member != p->member)
+ ksz9477_cfg_port_member(dev, port, (u8)member);
+
+ /* Check if forwarding needs to be updated. */
+ if (state != BR_STATE_FORWARDING) {
+ if (dev->br_member & (1 << port))
+ dev->member &= ~(1 << port);
+ }
+
+ /* When topology has changed the function ksz_update_port_member
+ * should be called to modify port forwarding behavior. However
+ * as the offload_fwd_mark indication cannot be reported here
+ * the switch forwarding function is not enabled.
+ */
+}
+
+static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+{
+ u8 data;
+
+ ksz_read8(dev, REG_SW_LUE_CTRL_2, &data);
+ data &= ~(SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S);
+ data |= (SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
+ ksz_write8(dev, REG_SW_LUE_CTRL_2, data);
+ if (port < dev->mib_port_cnt) {
+ /* flush individual port */
+ ksz_pread8(dev, port, P_STP_CTRL, &data);
+ if (!(data & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, port, P_STP_CTRL,
+ data | PORT_LEARN_DISABLE);
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ } else {
+ /* flush all */
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
+ }
+}
+
+static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (flag) {
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, true);
+ ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
+ true);
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
+ } else {
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
+ ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
+ false);
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, false);
+ }
+
+ return 0;
+}
+
+static void ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 vlan_table[3];
+ u16 vid;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return;
+ }
+
+ vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
+ if (untagged)
+ vlan_table[1] |= BIT(port);
+ else
+ vlan_table[1] &= ~BIT(port);
+ vlan_table[1] &= ~(BIT(dev->cpu_port));
+
+ vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
+
+ if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return;
+ }
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
+ }
+}
+
+static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ u32 vlan_table[3];
+ u16 vid;
+ u16 pvid;
+
+ ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
+ pvid = pvid & 0xFFF;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return -ETIMEDOUT;
+ }
+
+ vlan_table[2] &= ~BIT(port);
+
+ if (pvid == vid)
+ pvid = 1;
+
+ if (untagged)
+ vlan_table[1] &= ~BIT(port);
+
+ if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
+
+ return 0;
+}
+
+static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 alu_table[4];
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* find any entry with mac & vid */
+ data = vid << ALU_FID_INDEX_S;
+ data |= ((addr[0] << 8) | addr[1]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+ data = ((addr[2] << 24) | (addr[3] << 16));
+ data |= ((addr[4] << 8) | addr[5]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+ /* start read operation */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+ goto exit;
+ }
+
+ /* read ALU entry */
+ ksz9477_read_table(dev, alu_table);
+
+ /* update ALU entry */
+ alu_table[0] = ALU_V_STATIC_VALID;
+ alu_table[1] |= BIT(port);
+ if (vid)
+ alu_table[1] |= ALU_V_USE_FID;
+ alu_table[2] = (vid << ALU_V_FID_S);
+ alu_table[2] |= ((addr[0] << 8) | addr[1]);
+ alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
+ alu_table[3] |= ((addr[4] << 8) | addr[5]);
+
+ ksz9477_write_table(dev, alu_table);
+
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0)
+ dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 alu_table[4];
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* read any entry with mac & vid */
+ data = vid << ALU_FID_INDEX_S;
+ data |= ((addr[0] << 8) | addr[1]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+ data = ((addr[2] << 24) | (addr[3] << 16));
+ data |= ((addr[4] << 8) | addr[5]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+ /* start read operation */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+ goto exit;
+ }
+
+ ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
+ if (alu_table[0] & ALU_V_STATIC_VALID) {
+ ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
+ ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
+
+ /* clear forwarding port */
+ alu_table[2] &= ~BIT(port);
+
+ /* if there is no port to forward, clear table */
+ if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+ alu_table[3] = 0;
+ }
+ } else {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+ alu_table[3] = 0;
+ }
+
+ ksz9477_write_table(dev, alu_table);
+
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0)
+ dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
+{
+ alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
+ alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
+ alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
+ alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
+ ALU_V_PRIO_AGE_CNT_M;
+ alu->mstp = alu_table[0] & ALU_V_MSTP_M;
+
+ alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
+ alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
+ alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
+
+ alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
+
+ alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
+ alu->mac[1] = alu_table[2] & 0xFF;
+ alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
+ alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
+ alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
+ alu->mac[5] = alu_table[3] & 0xFF;
+}
+
+static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+ u32 ksz_data;
+ u32 alu_table[4];
+ struct alu_struct alu;
+ int timeout;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* start ALU search */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
+
+ do {
+ timeout = 1000;
+ do {
+ ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
+ if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (!timeout) {
+ dev_dbg(dev->dev, "Failed to search ALU\n");
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ /* read ALU table */
+ ksz9477_read_table(dev, alu_table);
+
+ ksz9477_convert_alu(&alu, alu_table);
+
+ if (alu.port_forward & BIT(port)) {
+ ret = cb(alu.mac, alu.fid, alu.is_static, data);
+ if (ret)
+ goto exit;
+ }
+ } while (ksz_data & ALU_START);
+
+exit:
+
+ /* stop ALU search */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 static_table[4];
+ u32 data;
+ int index;
+ u32 mac_hi, mac_lo;
+
+ mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+ mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+ mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+ mutex_lock(&dev->alu_mutex);
+
+ for (index = 0; index < dev->num_statics; index++) {
+ /* find empty slot first */
+ data = (index << ALU_STAT_INDEX_S) |
+ ALU_STAT_READ | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ goto exit;
+ }
+
+ /* read ALU static table */
+ ksz9477_read_table(dev, static_table);
+
+ if (static_table[0] & ALU_V_STATIC_VALID) {
+ /* check this has same vid & mac address */
+ if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
+ ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+ static_table[3] == mac_lo) {
+ /* found matching one */
+ break;
+ }
+ } else {
+ /* found empty one */
+ break;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->num_statics)
+ goto exit;
+
+ /* add entry */
+ static_table[0] = ALU_V_STATIC_VALID;
+ static_table[1] |= BIT(port);
+ if (mdb->vid)
+ static_table[1] |= ALU_V_USE_FID;
+ static_table[2] = (mdb->vid << ALU_V_FID_S);
+ static_table[2] |= mac_hi;
+ static_table[3] = mac_lo;
+
+ ksz9477_write_table(dev, static_table);
+
+ data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0)
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 static_table[4];
+ u32 data;
+ int index;
+ int ret = 0;
+ u32 mac_hi, mac_lo;
+
+ mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+ mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+ mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+ mutex_lock(&dev->alu_mutex);
+
+ for (index = 0; index < dev->num_statics; index++) {
+ /* find empty slot first */
+ data = (index << ALU_STAT_INDEX_S) |
+ ALU_STAT_READ | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ goto exit;
+ }
+
+ /* read ALU static table */
+ ksz9477_read_table(dev, static_table);
+
+ if (static_table[0] & ALU_V_STATIC_VALID) {
+ /* check this has same vid & mac address */
+
+ if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
+ ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+ static_table[3] == mac_lo) {
+ /* found matching one */
+ break;
+ }
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->num_statics)
+ goto exit;
+
+ /* clear port */
+ static_table[1] &= ~BIT(port);
+
+ if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
+ /* delete entry */
+ static_table[0] = 0;
+ static_table[1] = 0;
+ static_table[2] = 0;
+ static_table[3] = 0;
+ }
+
+ ksz9477_write_table(dev, static_table);
+
+ data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
+ if (ret < 0)
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ingress)
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ else
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+
+ /* configure mirror port */
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, true);
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+ return 0;
+}
+
+static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+ u8 data;
+
+ if (mirror->ingress)
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ else
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+
+ if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
+}
+
+static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ u8 data8;
+ u8 member;
+ u16 data16;
+ struct ksz_port *p = &dev->ports[port];
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
+ true);
+
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
+
+ /* set back pressure */
+ ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
+
+ /* enable broadcast storm limit */
+ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+
+ /* disable DiffServ priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
+
+ /* replace priority */
+ ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
+ false);
+ ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
+ MTI_PVID_REPLACE, false);
+
+ /* enable 802.1p priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (port < dev->phy_port_cnt) {
+ /* do not force flow control */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+ false);
+
+ } else {
+ /* force flow control */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+ true);
+
+ /* configure MAC to 1G & RGMII mode */
+ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
+ data8 &= ~PORT_MII_NOT_1GBIT;
+ data8 &= ~PORT_MII_SEL_M;
+ switch (dev->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ data8 |= PORT_MII_NOT_1GBIT;
+ data8 |= PORT_MII_SEL;
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= PORT_MII_NOT_1GBIT;
+ data8 |= PORT_RMII_SEL;
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= PORT_GMII_SEL;
+ p->phydev.speed = SPEED_1000;
+ break;
+ default:
+ data8 &= ~PORT_RGMII_ID_IG_ENABLE;
+ data8 &= ~PORT_RGMII_ID_EG_ENABLE;
+ if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ data8 |= PORT_RGMII_ID_IG_ENABLE;
+ if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ dev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ data8 |= PORT_RGMII_ID_EG_ENABLE;
+ data8 |= PORT_RGMII_SEL;
+ p->phydev.speed = SPEED_1000;
+ break;
+ }
+ ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
+ p->phydev.duplex = 1;
+ }
+ if (cpu_port) {
+ member = dev->port_mask;
+ dev->on_ports = dev->host_mask;
+ dev->live_ports = dev->host_mask;
+ } else {
+ member = dev->host_mask | p->vid_member;
+ dev->on_ports |= (1 << port);
+
+ /* Link was detected before port is enabled. */
+ if (p->phydev.link)
+ dev->live_ports |= (1 << port);
+ }
+ ksz9477_cfg_port_member(dev, port, member);
+
+ /* clear pending interrupts */
+ if (port < dev->phy_port_cnt)
+ ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
+}
+
+static void ksz9477_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ int i;
+
+ ds->num_ports = dev->port_cnt;
+
+ for (i = 0; i < dev->port_cnt; i++) {
+ if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+ dev->cpu_port = i;
+ dev->host_mask = (1 << dev->cpu_port);
+ dev->port_mask |= dev->host_mask;
+
+ /* enable cpu port */
+ ksz9477_port_setup(dev, i, true);
+ p = &dev->ports[dev->cpu_port];
+ p->vid_member = dev->port_mask;
+ p->on = 1;
+ }
+ }
+
+ dev->member = dev->host_mask;
+
+ for (i = 0; i < dev->mib_port_cnt; i++) {
+ if (i == dev->cpu_port)
+ continue;
+ p = &dev->ports[i];
+
+ /* Initialize to non-zero so that ksz_cfg_port_member() will
+ * be called.
+ */
+ p->vid_member = (1 << i);
+ p->member = dev->port_mask;
+ ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ p->on = 1;
+ if (i < dev->phy_port_cnt)
+ p->phy = 1;
+ if (dev->chip_id == 0x00947700 && i == 6) {
+ p->sgmii = 1;
+
+ /* SGMII PHY detection code is not implemented yet. */
+ p->phy = 0;
+ }
+ }
+}
+
+static int ksz9477_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = ksz9477_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ /* accept packet up to 2000bytes */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+
+ ksz9477_config_cpu_port(ds);
+
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
+
+ /* queue based egress rate limit */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
+
+ /* start switch */
+ ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
+
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz9477_switch_ops = {
+ .get_tag_protocol = ksz9477_get_tag_protocol,
+ .setup = ksz9477_setup,
+ .phy_read = ksz9477_phy_read16,
+ .phy_write = ksz9477_phy_write16,
+ .port_enable = ksz_enable_port,
+ .port_disable = ksz_disable_port,
+ .get_strings = ksz9477_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz9477_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz9477_port_vlan_filtering,
+ .port_vlan_prepare = ksz_port_vlan_prepare,
+ .port_vlan_add = ksz9477_port_vlan_add,
+ .port_vlan_del = ksz9477_port_vlan_del,
+ .port_fdb_dump = ksz9477_port_fdb_dump,
+ .port_fdb_add = ksz9477_port_fdb_add,
+ .port_fdb_del = ksz9477_port_fdb_del,
+ .port_mdb_prepare = ksz_port_mdb_prepare,
+ .port_mdb_add = ksz9477_port_mdb_add,
+ .port_mdb_del = ksz9477_port_mdb_del,
+ .port_mirror_add = ksz9477_port_mirror_add,
+ .port_mirror_del = ksz9477_port_mirror_del,
+};
+
+static u32 ksz9477_get_port_addr(int port, int offset)
+{
+ return PORT_CTRL_ADDR(port, offset);
+}
+
+static int ksz9477_switch_detect(struct ksz_device *dev)
+{
+ u8 data8;
+ u32 id32;
+ int ret;
+
+ /* turn off SPI DO Edge select */
+ ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+ if (ret)
+ return ret;
+
+ data8 &= ~SPI_AUTO_EDGE_DETECTION;
+ ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+ if (ret)
+ return ret;
+
+ /* read chip id */
+ ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
+ if (ret)
+ return ret;
+
+ /* Number of ports can be reduced depending on chip. */
+ dev->mib_port_cnt = TOTAL_PORT_NUM;
+ dev->phy_port_cnt = 5;
+
+ dev->chip_id = id32;
+
+ return 0;
+}
+
+struct ksz_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+};
+
+static const struct ksz_chip_data ksz9477_switch_chips[] = {
+ {
+ .chip_id = 0x00947700,
+ .dev_name = "KSZ9477",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ },
+ {
+ .chip_id = 0x00989700,
+ .dev_name = "KSZ9897",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ },
+};
+
+static int ksz9477_switch_init(struct ksz_device *dev)
+{
+ int i;
+
+ dev->ds->ops = &ksz9477_switch_ops;
+
+ for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz9477_switch_chips[i];
+
+ if (dev->chip_id == chip->chip_id) {
+ dev->name = chip->dev_name;
+ dev->num_vlans = chip->num_vlans;
+ dev->num_alus = chip->num_alus;
+ dev->num_statics = chip->num_statics;
+ dev->port_cnt = chip->port_cnt;
+ dev->cpu_ports = chip->cpu_ports;
+
+ break;
+ }
+ }
+
+ /* no switch found */
+ if (!dev->port_cnt)
+ return -ENODEV;
+
+ dev->port_mask = (1 << dev->port_cnt) - 1;
+
+ dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
+ dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
+
+ i = dev->mib_port_cnt;
+ dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i,
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+ for (i = 0; i < dev->mib_port_cnt; i++) {
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) *
+ (TOTAL_SWITCH_COUNTER_NUM + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+ }
+ dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
+
+ return 0;
+}
+
+static void ksz9477_switch_exit(struct ksz_device *dev)
+{
+ ksz9477_reset_switch(dev);
+}
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = ksz9477_port_setup,
+ .shutdown = ksz9477_reset_switch,
+ .detect = ksz9477_switch_detect,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+};
+
+int ksz9477_switch_register(struct ksz_device *dev)
+{
+ return ksz_switch_register(dev, &ksz9477_dev_ops);
+}
+EXPORT_SYMBOL(ksz9477_switch_register);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 6aa6752035a1..2938e892b631 100644
--- a/drivers/net/dsa/microchip/ksz_9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1,19 +1,8 @@
-/*
- * Microchip KSZ9477 register definitions
- *
- * Copyright (C) 2017
+/* SPDX-License-Identifier: GPL-2.0
*
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Microchip KSZ9477 register definitions
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
new file mode 100644
index 000000000000..d757ba151cb1
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ9477 series register access through SPI
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_priv.h"
+#include "ksz_spi.h"
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD 3
+#define KS_SPIOP_WR 2
+
+#define SPI_ADDR_SHIFT 24
+#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1)
+#define SPI_TURNAROUND_SHIFT 5
+
+/* Enough to read all switch port registers. */
+#define SPI_TX_BUF_LEN 0x100
+
+static int ksz9477_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val,
+ unsigned int len)
+{
+ u32 txbuf;
+ int ret;
+
+ txbuf = reg & SPI_ADDR_MASK;
+ txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT;
+ txbuf <<= SPI_TURNAROUND_SHIFT;
+ txbuf = cpu_to_be32(txbuf);
+
+ ret = spi_write_then_read(spi, &txbuf, 4, val, len);
+ return ret;
+}
+
+static int ksz9477_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val,
+ unsigned int len)
+{
+ u32 *txbuf = (u32 *)val;
+
+ *txbuf = reg & SPI_ADDR_MASK;
+ *txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT);
+ *txbuf <<= SPI_TURNAROUND_SHIFT;
+ *txbuf = cpu_to_be32(*txbuf);
+
+ return spi_write(spi, txbuf, 4 + len);
+}
+
+static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+
+ return ksz9477_spi_read_reg(spi, reg, data, len);
+}
+
+static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+
+ if (len > SPI_TX_BUF_LEN)
+ len = SPI_TX_BUF_LEN;
+ memcpy(&dev->txbuf[4], data, len);
+ return ksz9477_spi_write_reg(spi, reg, dev->txbuf, len);
+}
+
+static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ *val = 0;
+ ret = ksz_spi_read(dev, reg, (u8 *)val, 3);
+ if (!ret) {
+ *val = be32_to_cpu(*val);
+ /* convert to 24bit */
+ *val >>= 8;
+ }
+
+ return ret;
+}
+
+static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value)
+{
+ /* make it to big endian 24bit from MSB */
+ value <<= 8;
+ value = cpu_to_be32(value);
+ return ksz_spi_write(dev, reg, &value, 3);
+}
+
+static const struct ksz_io_ops ksz9477_spi_ops = {
+ .read8 = ksz_spi_read8,
+ .read16 = ksz_spi_read16,
+ .read24 = ksz_spi_read24,
+ .read32 = ksz_spi_read32,
+ .write8 = ksz_spi_write8,
+ .write16 = ksz_spi_write16,
+ .write24 = ksz_spi_write24,
+ .write32 = ksz_spi_write32,
+ .get = ksz_spi_get,
+ .set = ksz_spi_set,
+};
+
+static int ksz9477_spi_probe(struct spi_device *spi)
+{
+ struct ksz_device *dev;
+ int ret;
+
+ dev = ksz_switch_alloc(&spi->dev, &ksz9477_spi_ops, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ dev->txbuf = devm_kzalloc(dev->dev, 4 + SPI_TX_BUF_LEN, GFP_KERNEL);
+
+ ret = ksz9477_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static int ksz9477_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+
+ return 0;
+}
+
+static void ksz9477_spi_shutdown(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev && dev->dev_ops->shutdown)
+ dev->dev_ops->shutdown(dev);
+}
+
+static const struct of_device_id ksz9477_dt_ids[] = {
+ { .compatible = "microchip,ksz9477" },
+ { .compatible = "microchip,ksz9897" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+
+static struct spi_driver ksz9477_spi_driver = {
+ .driver = {
+ .name = "ksz9477-switch",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ksz9477_dt_ids),
+ },
+ .probe = ksz9477_spi_probe,
+ .remove = ksz9477_spi_remove,
+ .shutdown = ksz9477_spi_shutdown,
+};
+
+module_spi_driver(ksz9477_spi_driver);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 86b6464b4525..9705808c3af7 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
*/
#include <linux/delay.h>
@@ -25,1121 +14,251 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/of_net.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz_priv.h"
-static const struct {
- int index;
- char string[ETH_GSTRING_LEN];
-} mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
- { 0x00, "rx_hi" },
- { 0x01, "rx_undersize" },
- { 0x02, "rx_fragments" },
- { 0x03, "rx_oversize" },
- { 0x04, "rx_jabbers" },
- { 0x05, "rx_symbol_err" },
- { 0x06, "rx_crc_err" },
- { 0x07, "rx_align_err" },
- { 0x08, "rx_mac_ctrl" },
- { 0x09, "rx_pause" },
- { 0x0A, "rx_bcast" },
- { 0x0B, "rx_mcast" },
- { 0x0C, "rx_ucast" },
- { 0x0D, "rx_64_or_less" },
- { 0x0E, "rx_65_127" },
- { 0x0F, "rx_128_255" },
- { 0x10, "rx_256_511" },
- { 0x11, "rx_512_1023" },
- { 0x12, "rx_1024_1522" },
- { 0x13, "rx_1523_2000" },
- { 0x14, "rx_2001" },
- { 0x15, "tx_hi" },
- { 0x16, "tx_late_col" },
- { 0x17, "tx_pause" },
- { 0x18, "tx_bcast" },
- { 0x19, "tx_mcast" },
- { 0x1A, "tx_ucast" },
- { 0x1B, "tx_deferred" },
- { 0x1C, "tx_total_col" },
- { 0x1D, "tx_exc_col" },
- { 0x1E, "tx_single_col" },
- { 0x1F, "tx_mult_col" },
- { 0x80, "rx_total" },
- { 0x81, "tx_total" },
- { 0x82, "rx_discards" },
- { 0x83, "tx_discards" },
-};
-
-static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
-{
- u8 data;
-
- ksz_read8(dev, addr, &data);
- if (set)
- data |= bits;
- else
- data &= ~bits;
- ksz_write8(dev, addr, data);
-}
-
-static void ksz_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
-{
- u32 data;
-
- ksz_read32(dev, addr, &data);
- if (set)
- data |= bits;
- else
- data &= ~bits;
- ksz_write32(dev, addr, data);
-}
-
-static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
- bool set)
-{
- u32 addr;
- u8 data;
-
- addr = PORT_CTRL_ADDR(port, offset);
- ksz_read8(dev, addr, &data);
-
- if (set)
- data |= bits;
- else
- data &= ~bits;
-
- ksz_write8(dev, addr, data);
-}
-
-static void ksz_port_cfg32(struct ksz_device *dev, int port, int offset,
- u32 bits, bool set)
-{
- u32 addr;
- u32 data;
-
- addr = PORT_CTRL_ADDR(port, offset);
- ksz_read32(dev, addr, &data);
-
- if (set)
- data |= bits;
- else
- data &= ~bits;
-
- ksz_write32(dev, addr, data);
-}
-
-static int wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton, int timeout)
-{
- u8 data;
-
- do {
- ksz_read8(dev, REG_SW_VLAN_CTRL, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int get_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table)
-{
- struct ksz_device *dev = ds->priv;
- int ret;
-
- mutex_lock(&dev->vlan_mutex);
-
- ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
- ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
-
- /* wait to be cleared */
- ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to read vlan table\n");
- goto exit;
- }
-
- ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
- ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
- ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
-
- ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
-
-exit:
- mutex_unlock(&dev->vlan_mutex);
-
- return ret;
-}
-
-static int set_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table)
-{
- struct ksz_device *dev = ds->priv;
- int ret;
-
- mutex_lock(&dev->vlan_mutex);
-
- ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
- ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
- ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
-
- ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
- ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
-
- /* wait to be cleared */
- ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to write vlan table\n");
- goto exit;
- }
-
- ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
-
- /* update vlan cache table */
- dev->vlan_cache[vid].table[0] = vlan_table[0];
- dev->vlan_cache[vid].table[1] = vlan_table[1];
- dev->vlan_cache[vid].table[2] = vlan_table[2];
-
-exit:
- mutex_unlock(&dev->vlan_mutex);
-
- return ret;
-}
-
-static void read_table(struct dsa_switch *ds, u32 *table)
-{
- struct ksz_device *dev = ds->priv;
-
- ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
- ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
- ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
- ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
-}
-
-static void write_table(struct dsa_switch *ds, u32 *table)
-{
- struct ksz_device *dev = ds->priv;
-
- ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
- ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
- ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
- ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
-}
-
-static int wait_alu_ready(struct ksz_device *dev, u32 waiton, int timeout)
-{
- u32 data;
-
- do {
- ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int wait_alu_sta_ready(struct ksz_device *dev, u32 waiton, int timeout)
+void ksz_update_port_member(struct ksz_device *dev, int port)
{
- u32 data;
-
- do {
- ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int ksz_reset_switch(struct dsa_switch *ds)
-{
- struct ksz_device *dev = ds->priv;
- u8 data8;
- u16 data16;
- u32 data32;
-
- /* reset switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
-
- /* turn off SPI DO Edge select */
- ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
- data8 &= ~SPI_AUTO_EDGE_DETECTION;
- ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
-
- /* default configuration */
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
-
- /* disable interrupts */
- ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
- ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
- ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
-
- /* set broadcast storm protection 10% rate */
- ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16);
- data16 &= ~BROADCAST_STORM_RATE;
- data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
- ksz_write16(dev, REG_SW_MAC_CTRL_2, data16);
-
- return 0;
-}
-
-static void port_setup(struct ksz_device *dev, int port, bool cpu_port)
-{
- u8 data8;
- u16 data16;
-
- /* enable tag tail for host port */
- if (cpu_port)
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
- true);
-
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
-
- /* set back pressure */
- ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
-
- /* set flow control */
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
- PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, true);
-
- /* enable broadcast storm limit */
- ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
-
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
-
- /* replace priority */
- ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
- false);
- ksz_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
- MTI_PVID_REPLACE, false);
-
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
-
- /* configure MAC to 1G & RGMII mode */
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- data8 |= PORT_RGMII_ID_EG_ENABLE;
- data8 &= ~PORT_MII_NOT_1GBIT;
- data8 &= ~PORT_MII_SEL_M;
- data8 |= PORT_RGMII_SEL;
- ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
-
- /* clear pending interrupts */
- ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
-}
-
-static void ksz_config_cpu_port(struct dsa_switch *ds)
-{
- struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
int i;
- ds->num_ports = dev->port_cnt;
-
- for (i = 0; i < ds->num_ports; i++) {
- if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
- dev->cpu_port = i;
-
- /* enable cpu port */
- port_setup(dev, i, true);
- }
+ for (i = 0; i < dev->port_cnt; i++) {
+ if (i == port || i == dev->cpu_port)
+ continue;
+ p = &dev->ports[i];
+ if (!(dev->member & (1 << i)))
+ continue;
+
+ /* Port is a member of the bridge and is forwarding. */
+ if (p->stp_state == BR_STATE_FORWARDING &&
+ p->member != dev->member)
+ dev->dev_ops->cfg_port_member(dev, i, dev->member);
}
}
+EXPORT_SYMBOL_GPL(ksz_update_port_member);
-static int ksz_setup(struct dsa_switch *ds)
+int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
- int ret = 0;
-
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
+ u16 val = 0xffff;
- ret = ksz_reset_switch(ds);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
- }
-
- /* accept packet up to 2000bytes */
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
-
- ksz_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
-
- /* queue based egress rate limit */
- ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
-
- /* start switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
-
- return 0;
-}
-
-static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
- int port)
-{
- return DSA_TAG_PROTO_KSZ;
-}
-
-static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
-{
- struct ksz_device *dev = ds->priv;
- u16 val = 0;
-
- ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ dev->dev_ops->r_phy(dev, addr, reg, &val);
return val;
}
+EXPORT_SYMBOL_GPL(ksz_phy_read16);
-static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
-{
- struct ksz_device *dev = ds->priv;
-
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
-
- return 0;
-}
-
-static int ksz_enable_port(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
- /* setup slave port */
- port_setup(dev, port, false);
+ dev->dev_ops->w_phy(dev, addr, reg, val);
return 0;
}
+EXPORT_SYMBOL_GPL(ksz_phy_write16);
-static void ksz_disable_port(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
- /* there is no port disable */
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true);
-}
-
-static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
-{
if (sset != ETH_SS_STATS)
return 0;
- return TOTAL_SWITCH_COUNTER_NUM;
+ return dev->mib_cnt;
}
+EXPORT_SYMBOL_GPL(ksz_sset_count);
-static void ksz_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
-{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
- ETH_GSTRING_LEN);
- }
-}
-
-static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *buf)
+int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
{
struct ksz_device *dev = ds->priv;
- int i;
- u32 data;
- int timeout;
-
- mutex_lock(&dev->stats_mutex);
-
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- data = MIB_COUNTER_READ;
- data |= ((mib_names[i].index & 0xFF) << MIB_COUNTER_INDEX_S);
- ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
-
- timeout = 1000;
- do {
- ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
- &data);
- usleep_range(1, 10);
- if (!(data & MIB_COUNTER_READ))
- break;
- } while (timeout-- > 0);
-
- /* failed to read MIB. get out of loop */
- if (!timeout) {
- dev_dbg(dev->dev, "Failed to get MIB\n");
- break;
- }
- /* count resets upon read */
- ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
+ dev->br_member |= (1 << port);
- dev->mib_value[i] += (uint64_t)data;
- buf[i] = dev->mib_value[i];
- }
-
- mutex_unlock(&dev->stats_mutex);
-}
-
-static void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct ksz_device *dev = ds->priv;
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
+ /* port_stp_state_set() will be called after to put the port in
+ * appropriate state so there is no need to do anything.
+ */
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ return 0;
}
+EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
-static void ksz_port_fast_age(struct dsa_switch *ds, int port)
+void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
{
struct ksz_device *dev = ds->priv;
- u8 data8;
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 |= SW_FAST_AGING;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ dev->br_member &= ~(1 << port);
+ dev->member &= ~(1 << port);
- data8 &= ~SW_FAST_AGING;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ /* port_stp_state_set() will be called after to put the port in
+ * forwarding state so there is no need to do anything.
+ */
}
+EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
-static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag)
+void ksz_port_fast_age(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
- if (flag) {
- ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
- PORT_VLAN_LOOKUP_VID_0, true);
- ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, true);
- ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
- } else {
- ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
- ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, false);
- ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
- PORT_VLAN_LOOKUP_VID_0, false);
- }
-
- return 0;
+ dev->dev_ops->flush_dyn_mac_table(dev, port);
}
+EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
/* nothing needed */
return 0;
}
+EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
-static void ksz_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct ksz_device *dev = ds->priv;
- u32 vlan_table[3];
- u16 vid;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (get_vlan_table(ds, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return;
- }
-
- vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
- if (untagged)
- vlan_table[1] |= BIT(port);
- else
- vlan_table[1] &= ~BIT(port);
- vlan_table[1] &= ~(BIT(dev->cpu_port));
-
- vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
-
- if (set_vlan_table(ds, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return;
- }
-
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
- }
-}
-
-static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct ksz_device *dev = ds->priv;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- u32 vlan_table[3];
- u16 vid;
- u16 pvid;
-
- ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
- pvid = pvid & 0xFFF;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (get_vlan_table(ds, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return -ETIMEDOUT;
- }
-
- vlan_table[2] &= ~BIT(port);
-
- if (pvid == vid)
- pvid = 1;
-
- if (untagged)
- vlan_table[1] &= ~BIT(port);
-
- if (set_vlan_table(ds, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return -ETIMEDOUT;
- }
- }
-
- ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
-
- return 0;
-}
-
-struct alu_struct {
- /* entry 1 */
- u8 is_static:1;
- u8 is_src_filter:1;
- u8 is_dst_filter:1;
- u8 prio_age:3;
- u32 _reserv_0_1:23;
- u8 mstp:3;
- /* entry 2 */
- u8 is_override:1;
- u8 is_use_fid:1;
- u32 _reserv_1_1:23;
- u8 port_forward:7;
- /* entry 3 & 4*/
- u32 _reserv_2_1:9;
- u8 fid:7;
- u8 mac[ETH_ALEN];
-};
-
-static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct ksz_device *dev = ds->priv;
- u32 alu_table[4];
- u32 data;
- int ret = 0;
-
- mutex_lock(&dev->alu_mutex);
-
- /* find any entry with mac & vid */
- data = vid << ALU_FID_INDEX_S;
- data |= ((addr[0] << 8) | addr[1]);
- ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
-
- data = ((addr[2] << 24) | (addr[3] << 16));
- data |= ((addr[4] << 8) | addr[5]);
- ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
-
- /* start read operation */
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
-
- /* wait to be finished */
- ret = wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to read ALU\n");
- goto exit;
- }
-
- /* read ALU entry */
- read_table(ds, alu_table);
-
- /* update ALU entry */
- alu_table[0] = ALU_V_STATIC_VALID;
- alu_table[1] |= BIT(port);
- if (vid)
- alu_table[1] |= ALU_V_USE_FID;
- alu_table[2] = (vid << ALU_V_FID_S);
- alu_table[2] |= ((addr[0] << 8) | addr[1]);
- alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
- alu_table[3] |= ((addr[4] << 8) | addr[5]);
-
- write_table(ds, alu_table);
-
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
-
- /* wait to be finished */
- ret = wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0)
- dev_dbg(dev->dev, "Failed to write ALU\n");
-
-exit:
- mutex_unlock(&dev->alu_mutex);
-
- return ret;
-}
-
-static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct ksz_device *dev = ds->priv;
- u32 alu_table[4];
- u32 data;
- int ret = 0;
-
- mutex_lock(&dev->alu_mutex);
-
- /* read any entry with mac & vid */
- data = vid << ALU_FID_INDEX_S;
- data |= ((addr[0] << 8) | addr[1]);
- ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
-
- data = ((addr[2] << 24) | (addr[3] << 16));
- data |= ((addr[4] << 8) | addr[5]);
- ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
-
- /* start read operation */
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
-
- /* wait to be finished */
- ret = wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to read ALU\n");
- goto exit;
- }
-
- ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
- if (alu_table[0] & ALU_V_STATIC_VALID) {
- ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
- ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
- ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
-
- /* clear forwarding port */
- alu_table[2] &= ~BIT(port);
-
- /* if there is no port to forward, clear table */
- if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
- alu_table[0] = 0;
- alu_table[1] = 0;
- alu_table[2] = 0;
- alu_table[3] = 0;
- }
- } else {
- alu_table[0] = 0;
- alu_table[1] = 0;
- alu_table[2] = 0;
- alu_table[3] = 0;
- }
-
- write_table(ds, alu_table);
-
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
-
- /* wait to be finished */
- ret = wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0)
- dev_dbg(dev->dev, "Failed to write ALU\n");
-
-exit:
- mutex_unlock(&dev->alu_mutex);
-
- return ret;
-}
-
-static void convert_alu(struct alu_struct *alu, u32 *alu_table)
-{
- alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
- alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
- alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
- alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
- ALU_V_PRIO_AGE_CNT_M;
- alu->mstp = alu_table[0] & ALU_V_MSTP_M;
-
- alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
- alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
- alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
-
- alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
-
- alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
- alu->mac[1] = alu_table[2] & 0xFF;
- alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
- alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
- alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
- alu->mac[5] = alu_table[3] & 0xFF;
-}
-
-static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
+int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
+ void *data)
{
struct ksz_device *dev = ds->priv;
int ret = 0;
- u32 ksz_data;
- u32 alu_table[4];
+ u16 i = 0;
+ u16 entries = 0;
+ u8 timestamp = 0;
+ u8 fid;
+ u8 member;
struct alu_struct alu;
- int timeout;
-
- mutex_lock(&dev->alu_mutex);
-
- /* start ALU search */
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
do {
- timeout = 1000;
- do {
- ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
- if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (!timeout) {
- dev_dbg(dev->dev, "Failed to search ALU\n");
- ret = -ETIMEDOUT;
- goto exit;
- }
-
- /* read ALU table */
- read_table(ds, alu_table);
-
- convert_alu(&alu, alu_table);
-
- if (alu.port_forward & BIT(port)) {
+ alu.is_static = false;
+ ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
+ &member, &timestamp,
+ &entries);
+ if (!ret && (member & BIT(port))) {
ret = cb(alu.mac, alu.fid, alu.is_static, data);
if (ret)
- goto exit;
+ break;
}
- } while (ksz_data & ALU_START);
-
-exit:
-
- /* stop ALU search */
- ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
-
- mutex_unlock(&dev->alu_mutex);
+ i++;
+ } while (i < entries);
+ if (i >= entries)
+ ret = 0;
return ret;
}
+EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
/* nothing to do */
return 0;
}
+EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
-static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+void ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
- u32 static_table[4];
- u32 data;
+ struct alu_struct alu;
int index;
- u32 mac_hi, mac_lo;
-
- mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
- mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
- mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
-
- mutex_lock(&dev->alu_mutex);
+ int empty = 0;
+ alu.port_forward = 0;
for (index = 0; index < dev->num_statics; index++) {
- /* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
- ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
- /* wait to be finished */
- if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) {
- dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
- goto exit;
- }
-
- /* read ALU static table */
- read_table(ds, static_table);
-
- if (static_table[0] & ALU_V_STATIC_VALID) {
- /* check this has same vid & mac address */
- if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) &&
- ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
- (static_table[3] == mac_lo)) {
- /* found matching one */
+ if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
break;
- }
- } else {
- /* found empty one */
- break;
+ /* Remember the first empty entry. */
+ } else if (!empty) {
+ empty = index + 1;
}
}
/* no available entry */
- if (index == dev->num_statics)
- goto exit;
+ if (index == dev->num_statics && !empty)
+ return;
/* add entry */
- static_table[0] = ALU_V_STATIC_VALID;
- static_table[1] |= BIT(port);
- if (mdb->vid)
- static_table[1] |= ALU_V_USE_FID;
- static_table[2] = (mdb->vid << ALU_V_FID_S);
- static_table[2] |= mac_hi;
- static_table[3] = mac_lo;
-
- write_table(ds, static_table);
-
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
- ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
- /* wait to be finished */
- if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0)
- dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ if (index == dev->num_statics) {
+ index = empty - 1;
+ memset(&alu, 0, sizeof(alu));
+ memcpy(alu.mac, mdb->addr, ETH_ALEN);
+ alu.is_static = true;
+ }
+ alu.port_forward |= BIT(port);
+ if (mdb->vid) {
+ alu.is_use_fid = true;
-exit:
- mutex_unlock(&dev->alu_mutex);
+ /* Need a way to map VID to FID. */
+ alu.fid = mdb->vid;
+ }
+ dev->dev_ops->w_sta_mac_table(dev, index, &alu);
}
+EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
-static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
- u32 static_table[4];
- u32 data;
+ struct alu_struct alu;
int index;
int ret = 0;
- u32 mac_hi, mac_lo;
-
- mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
- mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
- mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
-
- mutex_lock(&dev->alu_mutex);
for (index = 0; index < dev->num_statics; index++) {
- /* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
- ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
- /* wait to be finished */
- ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
- goto exit;
- }
-
- /* read ALU static table */
- read_table(ds, static_table);
-
- if (static_table[0] & ALU_V_STATIC_VALID) {
- /* check this has same vid & mac address */
-
- if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) &&
- ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
- (static_table[3] == mac_lo)) {
- /* found matching one */
+ if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
break;
- }
}
}
/* no available entry */
- if (index == dev->num_statics) {
- ret = -EINVAL;
+ if (index == dev->num_statics)
goto exit;
- }
/* clear port */
- static_table[1] &= ~BIT(port);
-
- if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
- /* delete entry */
- static_table[0] = 0;
- static_table[1] = 0;
- static_table[2] = 0;
- static_table[3] = 0;
- }
-
- write_table(ds, static_table);
-
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
- ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
- /* wait to be finished */
- ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
- if (ret < 0)
- dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ alu.port_forward &= ~BIT(port);
+ if (!alu.port_forward)
+ alu.is_static = false;
+ dev->dev_ops->w_sta_mac_table(dev, index, &alu);
exit:
- mutex_unlock(&dev->alu_mutex);
-
return ret;
}
+EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
-static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
- if (ingress)
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
- else
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
-
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
-
- /* configure mirror port */
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
- PORT_MIRROR_SNIFFER, true);
+ /* setup slave port */
+ dev->dev_ops->port_setup(dev, port, false);
- ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+ /* port_stp_state_set() will be called after to enable the port so
+ * there is no need to do anything.
+ */
return 0;
}
+EXPORT_SYMBOL_GPL(ksz_enable_port);
-static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
- u8 data;
-
- if (mirror->ingress)
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
- else
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
-
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
-
- if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
- PORT_MIRROR_SNIFFER, false);
-}
-
-static const struct dsa_switch_ops ksz_switch_ops = {
- .get_tag_protocol = ksz_get_tag_protocol,
- .setup = ksz_setup,
- .phy_read = ksz_phy_read16,
- .phy_write = ksz_phy_write16,
- .port_enable = ksz_enable_port,
- .port_disable = ksz_disable_port,
- .get_strings = ksz_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_stp_state_set = ksz_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz_port_vlan_filtering,
- .port_vlan_prepare = ksz_port_vlan_prepare,
- .port_vlan_add = ksz_port_vlan_add,
- .port_vlan_del = ksz_port_vlan_del,
- .port_fdb_dump = ksz_port_fdb_dump,
- .port_fdb_add = ksz_port_fdb_add,
- .port_fdb_del = ksz_port_fdb_del,
- .port_mdb_prepare = ksz_port_mdb_prepare,
- .port_mdb_add = ksz_port_mdb_add,
- .port_mdb_del = ksz_port_mdb_del,
- .port_mirror_add = ksz_port_mirror_add,
- .port_mirror_del = ksz_port_mirror_del,
-};
-
-struct ksz_chip_data {
- u32 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
-};
-
-static const struct ksz_chip_data ksz_switch_chips[] = {
- {
- .chip_id = 0x00947700,
- .dev_name = "KSZ9477",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- },
- {
- .chip_id = 0x00989700,
- .dev_name = "KSZ9897",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- },
-};
-
-static int ksz_switch_init(struct ksz_device *dev)
-{
- int i;
-
- dev->ds->ops = &ksz_switch_ops;
-
- for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz_switch_chips[i];
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
- dev->cpu_ports = chip->cpu_ports;
-
- break;
- }
- }
+ dev->on_ports &= ~(1 << port);
+ dev->live_ports &= ~(1 << port);
- /* no switch found */
- if (!dev->port_cnt)
- return -ENODEV;
-
- return 0;
+ /* port_stp_state_set() will be called after to disable the port so
+ * there is no need to do anything.
+ */
}
+EXPORT_SYMBOL_GPL(ksz_disable_port);
struct ksz_device *ksz_switch_alloc(struct device *base,
const struct ksz_io_ops *ops,
@@ -1167,34 +286,8 @@ struct ksz_device *ksz_switch_alloc(struct device *base,
}
EXPORT_SYMBOL(ksz_switch_alloc);
-int ksz_switch_detect(struct ksz_device *dev)
-{
- u8 data8;
- u32 id32;
- int ret;
-
- /* turn off SPI DO Edge select */
- ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
- if (ret)
- return ret;
-
- data8 &= ~SPI_AUTO_EDGE_DETECTION;
- ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
- if (ret)
- return ret;
-
- /* read chip id */
- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
- if (ret)
- return ret;
-
- dev->chip_id = id32;
-
- return 0;
-}
-EXPORT_SYMBOL(ksz_switch_detect);
-
-int ksz_switch_register(struct ksz_device *dev)
+int ksz_switch_register(struct ksz_device *dev,
+ const struct ksz_dev_ops *ops)
{
int ret;
@@ -1206,19 +299,35 @@ int ksz_switch_register(struct ksz_device *dev)
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
- if (ksz_switch_detect(dev))
+ dev->dev_ops = ops;
+
+ if (dev->dev_ops->detect(dev))
return -EINVAL;
- ret = ksz_switch_init(dev);
+ ret = dev->dev_ops->init(dev);
if (ret)
return ret;
- return dsa_register_switch(dev->ds);
+ dev->interface = PHY_INTERFACE_MODE_MII;
+ if (dev->dev->of_node) {
+ ret = of_get_phy_mode(dev->dev->of_node);
+ if (ret >= 0)
+ dev->interface = ret;
+ }
+
+ ret = dsa_register_switch(dev->ds);
+ if (ret) {
+ dev->dev_ops->exit(dev);
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
+ dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
}
EXPORT_SYMBOL(ksz_switch_remove);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
new file mode 100644
index 000000000000..2dd832de0d52
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Microchip switch driver common header
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ_COMMON_H
+#define __KSZ_COMMON_H
+
+void ksz_update_port_member(struct ksz_device *dev, int port);
+
+/* Common DSA access functions */
+
+int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
+int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
+int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
+int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br);
+void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br);
+void ksz_port_fast_age(struct dsa_switch *ds, int port);
+int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
+ void *data);
+int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+void ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+
+/* Common register access functions */
+
+static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read8(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read16(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read24(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read32(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write8(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write16(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write24(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write32(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_get(struct ksz_device *dev, u32 reg, void *data,
+ size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->get(dev, reg, data, len);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_set(struct ksz_device *dev, u32 reg, void *data,
+ size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->set(dev, reg, data, len);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
+{
+ ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
+{
+ ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
+{
+ ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
+{
+ ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
+{
+ ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
+{
+ ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ u8 data;
+
+ ksz_read8(dev, addr, &data);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ ksz_write8(dev, addr, data);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ u32 addr;
+ u8 data;
+
+ addr = dev->dev_ops->get_port_addr(port, offset);
+ ksz_read8(dev, addr, &data);
+
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+
+ ksz_write8(dev, addr, data);
+}
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h
index 2a98dbd51456..a38ff0841ed4 100644
--- a/drivers/net/dsa/microchip/ksz_priv.h
+++ b/drivers/net/dsa/microchip/ksz_priv.h
@@ -1,19 +1,8 @@
-/*
- * Microchip KSZ series switch common definitions
- *
- * Copyright (C) 2017
+/* SPDX-License-Identifier: GPL-2.0
*
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Microchip KSZ series switch common definitions
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
*/
#ifndef __KSZ_PRIV_H
@@ -25,7 +14,7 @@
#include <linux/etherdevice.h>
#include <net/dsa.h>
-#include "ksz_9477_reg.h"
+#include "ksz9477_reg.h"
struct ksz_io_ops;
@@ -33,6 +22,27 @@ struct vlan_table {
u32 table[3];
};
+struct ksz_port_mib {
+ u8 cnt_ptr;
+ u64 *counters;
+};
+
+struct ksz_port {
+ u16 member;
+ u16 vid_member;
+ int stp_state;
+ struct phy_device phydev;
+
+ u32 on:1; /* port is not disabled by hardware */
+ u32 phy:1; /* port has a PHY */
+ u32 fiber:1; /* port is fiber */
+ u32 sgmii:1; /* port is SGMII */
+ u32 force:1;
+ u32 link_just_down:1; /* link just goes down */
+
+ struct ksz_port_mib mib;
+};
+
struct ksz_device {
struct dsa_switch *ds;
struct ksz_platform_data *pdata;
@@ -43,6 +53,7 @@ struct ksz_device {
struct mutex alu_mutex; /* ALU access */
struct mutex vlan_mutex; /* vlan access */
const struct ksz_io_ops *ops;
+ const struct ksz_dev_ops *dev_ops;
struct device *dev;
@@ -55,11 +66,37 @@ struct ksz_device {
int num_statics;
int cpu_port; /* port connected to CPU */
int cpu_ports; /* port bitmap can be cpu port */
+ int phy_port_cnt;
int port_cnt;
+ int reg_mib_cnt;
+ int mib_cnt;
+ int mib_port_cnt;
+ int last_port; /* ports after that not used */
+ phy_interface_t interface;
+ u32 regs_size;
struct vlan_table *vlan_cache;
u64 mib_value[TOTAL_SWITCH_COUNTER_NUM];
+
+ u8 *txbuf;
+
+ struct ksz_port *ports;
+ struct timer_list mib_read_timer;
+ struct work_struct mib_read;
+ unsigned long mib_read_interval;
+ u16 br_member;
+ u16 member;
+ u16 live_ports;
+ u16 on_ports; /* ports enabled by DSA */
+ u16 rx_ports;
+ u16 tx_ports;
+ u16 mirror_rx;
+ u16 mirror_tx;
+ u32 features; /* chip specific features */
+ u32 overrides; /* chip functions set by user */
+ u16 host_mask;
+ u16 port_mask;
};
struct ksz_io_ops {
@@ -71,140 +108,60 @@ struct ksz_io_ops {
int (*write16)(struct ksz_device *dev, u32 reg, u16 value);
int (*write24)(struct ksz_device *dev, u32 reg, u32 value);
int (*write32)(struct ksz_device *dev, u32 reg, u32 value);
- int (*phy_read16)(struct ksz_device *dev, int addr, int reg,
- u16 *value);
- int (*phy_write16)(struct ksz_device *dev, int addr, int reg,
- u16 value);
+ int (*get)(struct ksz_device *dev, u32 reg, void *data, size_t len);
+ int (*set)(struct ksz_device *dev, u32 reg, void *data, size_t len);
+};
+
+struct alu_struct {
+ /* entry 1 */
+ u8 is_static:1;
+ u8 is_src_filter:1;
+ u8 is_dst_filter:1;
+ u8 prio_age:3;
+ u32 _reserv_0_1:23;
+ u8 mstp:3;
+ /* entry 2 */
+ u8 is_override:1;
+ u8 is_use_fid:1;
+ u32 _reserv_1_1:23;
+ u8 port_forward:7;
+ /* entry 3 & 4*/
+ u32 _reserv_2_1:9;
+ u8 fid:7;
+ u8 mac[ETH_ALEN];
+};
+
+struct ksz_dev_ops {
+ u32 (*get_port_addr)(int port, int offset);
+ void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
+ void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
+ void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
+ void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp,
+ u16 *entries);
+ int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+ void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+ void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *cnt);
+ void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ int (*shutdown)(struct ksz_device *dev);
+ int (*detect)(struct ksz_device *dev);
+ int (*init)(struct ksz_device *dev);
+ void (*exit)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base,
const struct ksz_io_ops *ops, void *priv);
-int ksz_switch_detect(struct ksz_device *dev);
-int ksz_switch_register(struct ksz_device *dev);
+int ksz_switch_register(struct ksz_device *dev,
+ const struct ksz_dev_ops *ops);
void ksz_switch_remove(struct ksz_device *dev);
-static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read8(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read16(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read24(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read32(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write8(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write16(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write24(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write32(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
- u8 *data)
-{
- ksz_read8(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
- u16 *data)
-{
- ksz_read16(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
- u32 *data)
-{
- ksz_read32(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
- u8 data)
-{
- ksz_write8(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
- u16 data)
-{
- ksz_write16(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
- u32 data)
-{
- ksz_write32(dev, PORT_CTRL_ADDR(port, offset), data);
-}
+int ksz9477_switch_register(struct ksz_device *dev);
#endif
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
deleted file mode 100644
index 8c1778b42701..000000000000
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Microchip KSZ series register access through SPI
- *
- * Copyright (C) 2017
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-
-#include "ksz_priv.h"
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD 3
-#define KS_SPIOP_WR 2
-
-#define SPI_ADDR_SHIFT 24
-#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1)
-#define SPI_TURNAROUND_SHIFT 5
-
-static int ksz_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val,
- unsigned int len)
-{
- u32 txbuf;
- int ret;
-
- txbuf = reg & SPI_ADDR_MASK;
- txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT;
- txbuf <<= SPI_TURNAROUND_SHIFT;
- txbuf = cpu_to_be32(txbuf);
-
- ret = spi_write_then_read(spi, &txbuf, 4, val, len);
- return ret;
-}
-
-static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
- unsigned int len)
-{
- struct spi_device *spi = dev->priv;
-
- return ksz_spi_read_reg(spi, reg, data, len);
-}
-
-static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val)
-{
- return ksz_spi_read(dev, reg, val, 1);
-}
-
-static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val)
-{
- int ret = ksz_spi_read(dev, reg, (u8 *)val, 2);
-
- if (!ret)
- *val = be16_to_cpu(*val);
-
- return ret;
-}
-
-static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret;
-
- *val = 0;
- ret = ksz_spi_read(dev, reg, (u8 *)val, 3);
- if (!ret) {
- *val = be32_to_cpu(*val);
- /* convert to 24bit */
- *val >>= 8;
- }
-
- return ret;
-}
-
-static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret = ksz_spi_read(dev, reg, (u8 *)val, 4);
-
- if (!ret)
- *val = be32_to_cpu(*val);
-
- return ret;
-}
-
-static int ksz_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val,
- unsigned int len)
-{
- u32 txbuf;
- u8 data[12];
- int i;
-
- txbuf = reg & SPI_ADDR_MASK;
- txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT);
- txbuf <<= SPI_TURNAROUND_SHIFT;
- txbuf = cpu_to_be32(txbuf);
-
- data[0] = txbuf & 0xFF;
- data[1] = (txbuf & 0xFF00) >> 8;
- data[2] = (txbuf & 0xFF0000) >> 16;
- data[3] = (txbuf & 0xFF000000) >> 24;
- for (i = 0; i < len; i++)
- data[i + 4] = val[i];
-
- return spi_write(spi, &data, 4 + len);
-}
-
-static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value)
-{
- struct spi_device *spi = dev->priv;
-
- return ksz_spi_write_reg(spi, reg, &value, 1);
-}
-
-static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value)
-{
- struct spi_device *spi = dev->priv;
-
- value = cpu_to_be16(value);
- return ksz_spi_write_reg(spi, reg, (u8 *)&value, 2);
-}
-
-static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value)
-{
- struct spi_device *spi = dev->priv;
-
- /* make it to big endian 24bit from MSB */
- value <<= 8;
- value = cpu_to_be32(value);
- return ksz_spi_write_reg(spi, reg, (u8 *)&value, 3);
-}
-
-static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value)
-{
- struct spi_device *spi = dev->priv;
-
- value = cpu_to_be32(value);
- return ksz_spi_write_reg(spi, reg, (u8 *)&value, 4);
-}
-
-static const struct ksz_io_ops ksz_spi_ops = {
- .read8 = ksz_spi_read8,
- .read16 = ksz_spi_read16,
- .read24 = ksz_spi_read24,
- .read32 = ksz_spi_read32,
- .write8 = ksz_spi_write8,
- .write16 = ksz_spi_write16,
- .write24 = ksz_spi_write24,
- .write32 = ksz_spi_write32,
-};
-
-static int ksz_spi_probe(struct spi_device *spi)
-{
- struct ksz_device *dev;
- int ret;
-
- dev = ksz_switch_alloc(&spi->dev, &ksz_spi_ops, spi);
- if (!dev)
- return -ENOMEM;
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- ret = ksz_switch_register(dev);
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static int ksz_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- return 0;
-}
-
-static const struct of_device_id ksz_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz_dt_ids);
-
-static struct spi_driver ksz_spi_driver = {
- .driver = {
- .name = "ksz9477-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz_dt_ids),
- },
- .probe = ksz_spi_probe,
- .remove = ksz_spi_remove,
-};
-
-module_spi_driver(ksz_spi_driver);
-
-MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ Series Switch SPI access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_spi.h b/drivers/net/dsa/microchip/ksz_spi.h
new file mode 100644
index 000000000000..427811bd60b3
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_spi.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Microchip KSZ series SPI access common header
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#ifndef __KSZ_SPI_H
+#define __KSZ_SPI_H
+
+/* Chip dependent SPI access */
+static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
+ unsigned int len);
+static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data,
+ unsigned int len);
+
+static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+ return ksz_spi_read(dev, reg, val, 1);
+}
+
+static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+ int ret = ksz_spi_read(dev, reg, (u8 *)val, 2);
+
+ if (!ret)
+ *val = be16_to_cpu(*val);
+
+ return ret;
+}
+
+static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret = ksz_spi_read(dev, reg, (u8 *)val, 4);
+
+ if (!ret)
+ *val = be32_to_cpu(*val);
+
+ return ret;
+}
+
+static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ return ksz_spi_write(dev, reg, &value, 1);
+}
+
+static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+ value = cpu_to_be16(value);
+ return ksz_spi_write(dev, reg, &value, 2);
+}
+
+static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+ value = cpu_to_be32(value);
+ return ksz_spi_write(dev, reg, &value, 4);
+}
+
+static int ksz_spi_get(struct ksz_device *dev, u32 reg, void *data, size_t len)
+{
+ return ksz_spi_read(dev, reg, data, len);
+}
+
+static int ksz_spi_set(struct ksz_device *dev, u32 reg, void *data, size_t len)
+{
+ return ksz_spi_write(dev, reg, data, len);
+}
+
+#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index a5de9bffe5be..74547f43b938 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -658,7 +658,8 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(
+ phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index e05d4eddc935..b603f8d6ee3e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2524,11 +2524,22 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
mutex_unlock(&chip->reg_lock);
if (reg == MII_PHYSID2) {
- /* Some internal PHYS don't have a model number. Use
- * the mv88e6390 family model number instead.
- */
- if (!(val & 0x3f0))
- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+ /* Some internal PHYs don't have a model number. */
+ if (chip->info->family != MV88E6XXX_FAMILY_6165)
+ /* Then there is the 6165 family. It gets is
+ * PHYs correct. But it can also have two
+ * SERDES interfaces in the PHY address
+ * space. And these don't have a model
+ * number. But they are not PHYs, so we don't
+ * want to give them something a PHY driver
+ * will recognise.
+ *
+ * Use the mv88e6390 family model number
+ * instead, for anything which really could be
+ * a PHY,
+ */
+ if (!(val & 0x3f0))
+ val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
}
return err ? err : val;
@@ -3234,6 +3245,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3276,6 +3288,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3291,8 +3304,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390x_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_irq_setup = mv88e6390x_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390x_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
};
@@ -3318,6 +3331,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3405,11 +3419,11 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3710,11 +3724,11 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3757,11 +3771,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3777,8 +3791,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390x_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_irq_setup = mv88e6390x_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390x_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index cd7db60a508b..ebd26b6a93e6 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -368,12 +368,15 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
u16 reg;
int err;
- if (mode == PHY_INTERFACE_MODE_NA)
- return 0;
-
if (port != 9 && port != 10)
return -EOPNOTSUPP;
+ /* Default to a slow mode, so freeing up SERDES interfaces for
+ * other ports which might use them for SFPs.
+ */
+ if (mode == PHY_INTERFACE_MODE_NA)
+ mode = PHY_INTERFACE_MODE_1000BASEX;
+
switch (mode) {
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X;
@@ -437,6 +440,21 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ switch (mode) {
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ return mv88e6390x_port_set_cmode(chip, port, mode);
+}
+
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
{
int err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 36904c9bf955..0d81866d0e4a 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -310,6 +310,8 @@ int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index bb69650ff772..2caa8c8b4b55 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -619,15 +619,11 @@ out:
return ret;
}
-int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
{
int lane;
int err;
- /* Only support ports 9 and 10 at the moment */
- if (port < 9)
- return 0;
-
lane = mv88e6390x_serdes_get_lane(chip, port);
if (lane == -ENODEV)
@@ -663,11 +659,19 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
return mv88e6390_serdes_irq_enable(chip, port, lane);
}
-void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+{
+ if (port < 9)
+ return 0;
+
+ return mv88e6390_serdes_irq_setup(chip, port);
+}
+
+void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
{
int lane = mv88e6390x_serdes_get_lane(chip, port);
- if (port < 9)
+ if (lane == -ENODEV)
return;
if (lane < 0)
@@ -685,6 +689,14 @@ void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
chip->ports[port].serdes_irq = 0;
}
+void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+{
+ if (port < 9)
+ return;
+
+ mv88e6390x_serdes_irq_free(chip, port);
+}
+
int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
{
u8 cmode = chip->ports[port].cmode;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 7870c5a9ef12..573dce8b1eb4 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -77,6 +77,8 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7c9348a26cbb..91fc64c1145e 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1283,7 +1283,7 @@ static int greth_mdio_probe(struct net_device *dev)
else
phy_set_max_speed(phy, SPEED_100);
- phy->advertising = phy->supported;
+ linkmode_copy(phy->advertising, phy->supported);
greth->link = 0;
greth->speed = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 151bdb629e8a..128cd648ba99 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -857,6 +857,7 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -878,9 +879,15 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- phy_data->phydev->supported = PHY_10BT_FEATURES |
- PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+
+ linkmode_copy(phy_data->phydev->supported, supported);
+
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -891,6 +898,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -951,9 +959,13 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- phy_data->phydev->supported = (PHY_10BT_FEATURES |
- PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+ linkmode_copy(phy_data->phydev->supported, supported);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -976,7 +988,6 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct phy_device *phydev;
- u32 advertising;
int ret;
/* If we already have a PHY, just return */
@@ -1036,9 +1047,8 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
xgbe_phy_external_phy_quirks(pdata);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- lks->link_modes.advertising);
- phydev->advertising &= advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ lks->link_modes.advertising);
phy_start_aneg(phy_data->phydev);
@@ -1497,7 +1507,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_data->phydev->advertising);
if (phy_data->phydev->pause) {
XGBE_SET_LP_ADV(lks, Pause);
@@ -1815,7 +1825,6 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data = pdata->phy_data;
- u32 advertising;
int ret;
ret = xgbe_phy_find_phy_device(pdata);
@@ -1825,12 +1834,10 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return 0;
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- lks->link_modes.advertising);
-
phy_data->phydev->autoneg = pdata->phy.autoneg;
- phy_data->phydev->advertising = phy_data->phydev->supported &
- advertising;
+ linkmode_and(phy_data->phydev->advertising,
+ phy_data->phydev->supported,
+ lks->link_modes.advertising);
if (pdata->phy.autoneg != AUTONEG_ENABLE) {
phy_data->phydev->speed = pdata->phy.speed;
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index f5fe3bb2e59d..53529cd85162 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -109,6 +109,7 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -148,16 +149,17 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- phydev->supported &= ~(SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_AUI |
- SUPPORTED_MII |
- SUPPORTED_FIBRE |
- SUPPORTED_BNC);
- phydev->advertising = phydev->supported;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
+
+ linkmode_andnot(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 686f6d8c9e79..4556630ee286 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
+ aq_filters.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index becb578211ed..6b6d1724676e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -14,7 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
-
+#include <linux/if_vlan.h>
#include "ver.h"
#include "aq_cfg.h"
#include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 99ef1daaa4d8..a5fd71692c8b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -12,6 +12,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
+#include "aq_filters.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -213,7 +214,36 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = aq_get_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int aq_ethtool_set_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = aq_add_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = aq_del_rxnfc_rule(aq_nic, cmd);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -520,6 +550,7 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
+ .set_rxnfc = aq_ethtool_set_rxnfc,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
new file mode 100644
index 000000000000..18bc035da850
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.c: RX filters related functions. */
+
+#include "aq_filters.h"
+
+static bool __must_check
+aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return false;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ return true;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ case IPV6_USER_FLOW:
+ switch (fsp->h_u.usr_ip6_spec.l4_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static bool __must_check
+aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
+ struct ethtool_rx_flow_spec *fsp2)
+{
+ if (fsp1->flow_type != fsp2->flow_type ||
+ memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
+ memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
+ memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
+ memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
+ return false;
+
+ return true;
+}
+
+static bool __must_check
+aq_rule_already_exists(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct aq_rx_filter *rule;
+ struct hlist_node *aq_node2;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == fsp->location)
+ continue;
+ if (aq_match_filter(&rule->aq_fsp, fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: This filter is already set\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
+ fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FL3L4,
+ AQ_RX_LAST_LOC_FL3L4);
+ return -EINVAL;
+ }
+ if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (rx_fltrs->fl3l4.is_ipv6 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified location for ipv6 must be %d or %d",
+ AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fl2(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
+ fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FETHERT,
+ AQ_RX_LAST_LOC_FETHERT);
+ return -EINVAL;
+ }
+
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
+ fsp->m_u.ether_spec.h_proto == 0U) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: proto (ether_type) parameter must be specified");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
+ fsp->location > AQ_RX_LAST_LOC_FVLANID) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FVLANID,
+ AQ_RX_LAST_LOC_FVLANID);
+ return -EINVAL;
+ }
+
+ if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
+ aq_nic->active_vlans))) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown vlan-id specified");
+ return -EINVAL;
+ }
+
+ if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: queue number must be in range [0, %d]",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __must_check
+aq_check_filter(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
+ err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
+ } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ } else {
+ netdev_err(aq_nic->ndev,
+ "ethtool: invalid vlan mask 0x%x specified",
+ be16_to_cpu(fsp->m_ext.vlan_tci));
+ err = -EINVAL;
+ }
+ } else {
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IP_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ default:
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown flow-type specified");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static bool __must_check
+aq_rule_is_not_support(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_support = false;
+
+ if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: Please, to enable the RX flow control:\n"
+ "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
+ rule_is_not_support = true;
+ } else if (!aq_rule_is_approve(fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified flow type is not supported\n");
+ rule_is_not_support = true;
+ } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
+ (fsp->h_u.tcp_ip4_spec.tos ||
+ fsp->h_u.tcp_ip6_spec.tclass)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified tos tclass are not supported\n");
+ rule_is_not_support = true;
+ } else if (fsp->flow_type & FLOW_MAC_EXT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: MAC_EXT is not supported");
+ rule_is_not_support = true;
+ }
+
+ return rule_is_not_support;
+}
+
+static bool __must_check
+aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_correct = false;
+
+ if (!aq_nic) {
+ rule_is_not_correct = true;
+ } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified number %u rule is invalid\n",
+ fsp->location);
+ rule_is_not_correct = true;
+ } else if (aq_check_filter(aq_nic, fsp)) {
+ rule_is_not_correct = true;
+ } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified action is invalid.\n"
+ "Maximum allowable value action is %u.\n",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ rule_is_not_correct = true;
+ }
+ }
+
+ return rule_is_not_correct;
+}
+
+static int __must_check
+aq_check_rule(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+
+ if (aq_rule_is_not_correct(aq_nic, fsp))
+ err = -EINVAL;
+ else if (aq_rule_is_not_support(aq_nic, fsp))
+ err = -EOPNOTSUPP;
+ else if (aq_rule_already_exists(aq_nic, fsp))
+ err = -EEXIST;
+
+ return err;
+}
+
+static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l2 *data, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
+
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ data->queue = fsp->ring_cookie;
+ else
+ data->queue = -1;
+
+ data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
+ data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
+ == VLAN_PRIO_MASK;
+ data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+static int aq_add_del_fether(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ struct aq_rx_filter_l2 data;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
+
+ if (unlikely(!aq_hw_ops->hw_filter_l2_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
+ return -EOPNOTSUPP;
+
+ if (add)
+ return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
+ else
+ return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
+}
+
+static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
+ aq_vlans[i].vlan_id == vlan) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Function rebuilds array of vlan filters so that filters with assigned
+ * queue have a precedence over just vlans on the interface.
+ */
+static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
+ unsigned long *active_vlans,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ bool vlan_busy = false;
+ int vlan = -1;
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
+ continue;
+ do {
+ vlan = find_next_bit(active_vlans,
+ VLAN_N_VID,
+ vlan + 1);
+ if (vlan == VLAN_N_VID) {
+ aq_vlans[i].enable = 0U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = 0;
+ continue;
+ }
+
+ vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
+ if (!vlan_busy) {
+ aq_vlans[i].enable = 1U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = vlan;
+ }
+ } while (vlan_busy && vlan != VLAN_N_VID);
+ }
+}
+
+static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_vlan *aq_vlans, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+ int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
+ int i;
+
+ memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
+
+ if (!add)
+ return 0;
+
+ /* remove vlan if it was in table without queue assignment */
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].vlan_id ==
+ (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
+ aq_vlans[i].enable = false;
+ }
+ }
+
+ aq_vlans[location].location = location;
+ aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_VID_MASK;
+ aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
+ aq_vlans[location].enable = 1U;
+
+ return 0;
+}
+
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
+ break;
+ }
+ if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ struct ethtool_rxnfc cmd;
+
+ cmd.fs.location = rule->aq_fsp.location;
+ return aq_del_rxnfc_rule(aq_nic, &cmd);
+ }
+
+ return -ENOENT;
+}
+
+static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+
+ aq_set_data_fvlan(aq_nic,
+ aq_rx_fltr,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
+ add);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l3l4 *data, bool add)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
+ data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
+
+ if (!add) {
+ if (!data->is_ipv6)
+ rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
+ else
+ rx_fltrs->fl3l4.active_ipv6 &=
+ ~BIT((data->location) / 4);
+
+ return 0;
+ }
+
+ data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_UDP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_SCTP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ default:
+ break;
+ }
+
+ if (!data->is_ipv6) {
+ data->ip_src[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
+ data->ip_dst[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
+ } else {
+ int i;
+
+ rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ data->ip_dst[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
+ data->ip_src[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
+ }
+ data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
+ }
+ if (fsp->flow_type != IP_USER_FLOW &&
+ fsp->flow_type != IPV6_USER_FLOW) {
+ if (!data->is_ipv6) {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip4_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip4_spec.psrc);
+ } else {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip6_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip6_spec.psrc);
+ }
+ }
+ if (data->ip_src[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
+ if (data->ip_dst[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
+ if (data->p_dst)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
+ if (data->p_src)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
+ } else {
+ data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ }
+
+ return 0;
+}
+
+static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
+ const struct aq_hw_ops *aq_hw_ops,
+ struct aq_rx_filter_l3l4 *data)
+{
+ if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
+ return -EOPNOTSUPP;
+
+ return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
+}
+
+static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ struct aq_rx_filter_l3l4 data;
+
+ if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
+ aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
+ aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
+ return -EINVAL;
+
+ return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
+}
+
+static int aq_add_del_rule(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ int err = -EINVAL;
+
+ if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
+ if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_VID_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_vlan;
+ err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
+ } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_PRIO_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ }
+ } else {
+ switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_l3l4;
+ err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int aq_update_table_filters(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, u16 index,
+ struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL, *parent = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location >= index)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->aq_fsp.location == index) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+ if (unlikely(!aq_rx_fltr))
+ return err;
+
+ INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
+
+ if (parent)
+ hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
+ else
+ hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
+
+ ++rx_fltrs->active_filters;
+
+ return 0;
+}
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ return rx_fltrs->active_filters;
+}
+
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
+{
+ return &aq_nic->aq_hw_rx_fltrs;
+}
+
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *aq_rx_fltr;
+ int err = 0;
+
+ err = aq_check_rule(aq_nic, fsp);
+ if (err)
+ goto err_exit;
+
+ aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
+ if (unlikely(!aq_rx_fltr)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
+
+ err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
+ if (unlikely(err))
+ goto err_free;
+
+ err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
+ if (unlikely(err)) {
+ hlist_del(&aq_rx_fltr->aq_node);
+ --rx_fltrs->active_filters;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(aq_rx_fltr);
+err_exit:
+ return err;
+}
+
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == cmd->fs.location)
+ break;
+ }
+
+ if (rule && rule->aq_fsp.location == cmd->fs.location) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+ return err;
+}
+
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node)
+ if (fsp->location <= rule->aq_fsp.location)
+ break;
+
+ if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
+ return -EINVAL;
+
+ memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
+
+ return 0;
+}
+
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int count = 0;
+
+ cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (unlikely(count == cmd->rule_cnt))
+ return -EMSGSIZE;
+
+ rule_locs[count++] = rule->aq_fsp.location;
+ }
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ if (err)
+ goto err_exit;
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, true);
+ if (err)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int hweight = 0;
+ int err = 0;
+ int i;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
+ hweight += hweight_long(aq_nic->active_vlans[i]);
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ }
+
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ if (err)
+ return err;
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (hweight < AQ_VLAN_MAX_FILTERS)
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
+ /* otherwise left in promiscue mode */
+ }
+
+ return err;
+}
+
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int err = 0;
+
+ memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
new file mode 100644
index 000000000000..c6a08c6585d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.h: RX filters related functions. */
+
+#ifndef AQ_FILTERS_H
+#define AQ_FILTERS_H
+
+#include "aq_nic.h"
+
+enum aq_rx_filter_type {
+ aq_rx_filter_ethertype,
+ aq_rx_filter_vlan,
+ aq_rx_filter_l3l4
+};
+
+struct aq_rx_filter {
+ struct hlist_node aq_node;
+ enum aq_rx_filter_type type;
+ struct ethtool_rx_flow_spec aq_fsp;
+};
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic);
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic);
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
+
+#endif /* AQ_FILTERS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a1e70da358ca..81aab73dc22f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -18,6 +18,17 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
+#define AQ_RX_FIRST_LOC_FVLANID 0U
+#define AQ_RX_LAST_LOC_FVLANID 15U
+#define AQ_RX_FIRST_LOC_FETHERT 16U
+#define AQ_RX_LAST_LOC_FETHERT 31U
+#define AQ_RX_FIRST_LOC_FL3L4 32U
+#define AQ_RX_LAST_LOC_FL3L4 39U
+#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4
+#define AQ_VLAN_MAX_FILTERS \
+ (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
+#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
+
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
@@ -130,6 +141,7 @@ struct aq_hw_s {
struct aq_ring_s;
struct aq_ring_param_s;
struct sk_buff;
+struct aq_rx_filter_l3l4;
struct aq_hw_ops {
@@ -183,6 +195,23 @@ struct aq_hw_ops {
int (*hw_packet_filter_set)(struct aq_hw_s *self,
unsigned int packet_filter);
+ int (*hw_filter_l3l4_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l3l4_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l2_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_l2_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_vlan_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans);
+
+ int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable);
+
int (*hw_multicast_list_set)(struct aq_hw_s *self,
u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 7c07eef275eb..2a11c1eefd8f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,6 +13,7 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
+#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -49,6 +50,11 @@ static int aq_ndev_open(struct net_device *ndev)
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
+
+ err = aq_reapply_rxnfc_all_rules(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
@@ -101,6 +107,21 @@ static int aq_ndev_set_features(struct net_device *ndev,
bool is_lro = false;
int err = 0;
+ if (!(features & NETIF_F_NTUPLE)) {
+ if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
+ err = aq_clear_rxnfc_all_rules(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+ if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ err = aq_filters_vlan_offload_off(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+
aq_cfg->features = features;
if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
@@ -119,6 +140,7 @@ static int aq_ndev_set_features(struct net_device *ndev,
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
+err_exit:
return err;
}
@@ -147,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
aq_nic_set_multicast_list(aq_nic, ndev);
}
+static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ set_bit(vid, aq_nic->active_vlans);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ clear_bit(vid, aq_nic->active_vlans);
+
+ if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
+ return aq_filters_vlans_update(aq_nic);
+
+ return 0;
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -154,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
- .ndo_set_features = aq_ndev_set_features
+ .ndo_set_features = aq_ndev_set_features,
+ .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7abdc0952425..279ea58f4a9e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -84,8 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
- cfg->vlan_id = 0U;
-
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 44ec47a3d60a..8e34c1e49bf2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,7 +35,6 @@ struct aq_nic_cfg_s {
u32 mtu;
u32 flow_control;
u32 link_speed_msk;
- u32 vlan_id;
u32 wol;
u16 is_mc_list_enabled;
u16 mc_list_count;
@@ -61,6 +60,23 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+struct aq_hw_rx_fl2 {
+ struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
+};
+
+struct aq_hw_rx_fl3l4 {
+ u8 active_ipv4;
+ u8 active_ipv6:2;
+ u8 is_ipv6;
+};
+
+struct aq_hw_rx_fltrs_s {
+ struct hlist_head filter_list;
+ u16 active_filters;
+ struct aq_hw_rx_fl2 fl2;
+ struct aq_hw_rx_fl3l4 fl3l4;
+};
+
struct aq_nic_s {
atomic_t flags;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
@@ -81,10 +97,13 @@ struct aq_nic_s {
u32 count;
u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
+ /* Bitmask of currently assigned vlans from linux */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct pci_dev *pdev;
unsigned int msix_entry_mask;
u32 irqvecs;
+ struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 1d5d6b8df855..c8b44cdb91c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -19,6 +19,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
+#include "aq_filters.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -309,6 +310,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
struct aq_nic_s *self = pci_get_drvdata(pdev);
if (self->ndev) {
+ aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
aq_nic_free_vectors(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index f02592f43fe3..6af7d7f0cdca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -41,7 +41,9 @@
NETIF_F_RXHASH | \
NETIF_F_SG | \
NETIF_F_TSO | \
- NETIF_F_LRO, \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -319,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
- if (cfg->vlan_id) {
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
- hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
- hw_atl_rpf_vlan_untagged_act_set(self, 1U);
-
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
- hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
- } else {
- hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- }
+ // Always accept untagged packets
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
/* Rx Interrupts */
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
@@ -945,6 +938,142 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_cmd_clear(self, location);
+ hw_atl_rpf_l4_spd_set(self, 0U, location);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location);
+ hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+ } else {
+ int i;
+
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ hw_atl_rpfl3l4_cmd_clear(self, location + i);
+ hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+ }
+ hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ hw_atl_b0_hw_fl3l4_clear(self, data);
+
+ if (data->cmd) {
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+ location,
+ data->ip_dst[0]);
+ hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+ location,
+ data->ip_src[0]);
+ } else {
+ hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+ location,
+ data->ip_dst);
+ hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+ location,
+ data->ip_src);
+ }
+ }
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self,
+ !!data->user_priority_en,
+ data->location);
+ if (data->user_priority_en)
+ hw_atl_rpf_etht_user_priority_set(self,
+ data->user_priority,
+ data->location);
+
+ if (data->queue < 0) {
+ hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
+ } else {
+ hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
+ hw_atl_rpf_etht_flr_set(self, 0U, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
+
+ return aq_hw_err_from_flags(self);
+}
+
+/**
+ * @brief Set VLAN filter table
+ * @details Configure VLAN filter table to accept (and assign the queue) traffic
+ * for the particular vlan ids.
+ * Note: use this function under vlan promisc mode not to lost the traffic
+ *
+ * @param aq_hw_s
+ * @param aq_rx_filter_vlan VLAN filter configuration
+ * @return 0 - OK, <0 - error
+ */
+static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id,
+ i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vland filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@@ -969,6 +1098,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
+ .hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
+ .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
+ .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
+ .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
.hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 5502ec5f0f69..939f77e2e117 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -898,6 +898,24 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
vlan_id_flr);
}
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_EN_F_MSK,
+ HW_ATL_RPF_VL_RXQ_EN_F_SHIFT,
+ vlan_rxq_en);
+}
+
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_F_MSK,
+ HW_ATL_RPF_VL_RXQ_F_SHIFT,
+ vlan_rxq);
+};
+
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter)
{
@@ -965,6 +983,20 @@ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
+ HW_ATL_RPF_L4_SPD_MSK,
+ HW_ATL_RPF_L4_SPD_SHIFT, val);
+}
+
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
+ HW_ATL_RPF_L4_DPD_MSK,
+ HW_ATL_RPF_L4_DPD_SHIFT, val);
+}
+
/* RPO: rx packet offload */
void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
@@ -1476,3 +1508,80 @@ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
up_force_intr);
}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location),
+ ipv4_dest);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location),
+ ipv4_src);
+}
+
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ ipv6_src[i]);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ ipv6_dest[i]);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 41f239928c15..03c570d115fe 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -441,6 +441,14 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
u32 filter);
+/* Set VLAN RX queue assignment enable */
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter);
+
+/* Set VLAN RX queue */
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter);
+
/* set ethertype filter enable */
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter);
@@ -475,6 +483,12 @@ void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
/* set ethertype filter */
void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+/* set L4 source port */
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
+/* set L4 destination port */
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
/* rpo */
/* set ipv4 header checksum offload enable */
@@ -704,4 +718,38 @@ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
+/* clear ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* set ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest);
+
+/* set ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src);
+
+/* set command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
+
+/* set ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src);
+
+/* set ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest);
+
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index a715fa317b1c..8470d92db812 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1092,24 +1092,43 @@
/* Default value of bitfield vl_id{F}[B:0] */
#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
-/* RX et_en{F} Bitfield Definitions
- * Preprocessor definitions for the bitfield "et_en{F}".
+/* RX vl_rxq_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
- * PORT="pif_rpf_et_en_i[0]"
- */
-
-/* Register address for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
-/* Bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
-/* Inverted bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
-/* Lower bit position of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_SHIFT 31
-/* Width of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_WIDTH 1
-/* Default value of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
+ * PORT="pif_rpf_vl_rxq_en_i"
+ */
+
+/* Register address for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000
+/* Inverted bitmask for bitfield vl_rxq_en{F}[ */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF
+/* Lower bit position of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28
+/* Width of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1
+/* Default value of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0
+
+/* RX vl_rxq{F}[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_rxq0_i[4:0]"
+ */
+
+/* Register address for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000
+/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF
+/* Lower bit position of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20
+/* Width of bitfield vl_rxw{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5
+/* Default value of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1263,6 +1282,44 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+/* RX l4_sp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
+ * Parameter: srcport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_sp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4)
+/* Bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_SHIFT 0
+/* Width of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_WIDTH 16
+/* Default value of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0
+
+/* RX l4_dp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]".
+ * Parameter: destport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_dp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4)
+/* Bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_SHIFT 0
+/* Width of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_WIDTH 16
+/* Default value of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0
+
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_rpo_ipv4_chk_en_i"
@@ -2418,4 +2475,48 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
+#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
+#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
+#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 7def1cb8ab9d..9b74a3197d7f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -263,6 +263,8 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
10, 1000U);
+ if (err)
+ return err;
}
if (self->rbl_enabled)
@@ -454,8 +456,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
(fw.val =
aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
fw.tid), 1000U, 100U);
- if (err < 0)
- goto err_exit;
if (fw.len == 0xFFFFU) {
err = hw_atl_utils_fw_rpc_call(self, sw.len);
@@ -463,8 +463,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
- if (err < 0)
- goto err_exit;
if (rpc) {
if (fw.len) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 3613fca64b58..48278e333462 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -240,6 +240,64 @@ struct __packed offload_info {
u8 buf[0];
};
+enum hw_atl_rx_action_with_traffic {
+ HW_ATL_RX_DISCARD,
+ HW_ATL_RX_HOST,
+};
+
+struct aq_rx_filter_vlan {
+ u8 enable;
+ u8 location;
+ u16 vlan_id;
+ u8 queue;
+};
+
+struct aq_rx_filter_l2 {
+ s8 queue;
+ u8 location;
+ u8 user_priority_en;
+ u8 user_priority;
+ u16 ethertype;
+};
+
+struct aq_rx_filter_l3l4 {
+ u32 cmd;
+ u8 location;
+ u32 ip_dst[4];
+ u32 ip_src[4];
+ u16 p_dst;
+ u16 p_src;
+ u8 is_ipv6;
+};
+
+enum hw_atl_rx_protocol_value_l3l4 {
+ HW_ATL_RX_TCP,
+ HW_ATL_RX_UDP,
+ HW_ATL_RX_SCTP,
+ HW_ATL_RX_ICMP
+};
+
+enum hw_atl_rx_ctrl_registers_l3l4 {
+ HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22),
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23),
+ HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24),
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25),
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26),
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27),
+ HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28),
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29),
+ HW_ATL_RX_ENABLE_L3_IPV6 = BIT(30),
+ HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31)
+};
+
+#define HW_ATL_RX_QUEUE_FL3L4_SHIFT 8U
+#define HW_ATL_RX_ACTION_FL3F4_SHIFT 16U
+
+#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U
+
+#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
+ ((location) - AQ_RX_FIRST_LOC_FL3L4)
+
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index bd277b0dc615..4406325fdd9f 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -432,7 +432,8 @@ static int arc_emac_open(struct net_device *ndev)
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
- phy_dev->advertising &= phy_dev->supported;
+ linkmode_and(phy_dev->advertising, phy_dev->advertising,
+ phy_dev->supported);
priv->last_rx_bd = 0;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e445ab724827..f44808959ff3 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2248,6 +2248,7 @@ static void b44_adjust_link(struct net_device *dev)
static int b44_register_phy_one(struct b44 *bp)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mii_bus *mii_bus;
struct ssb_device *sdev = bp->sdev;
struct phy_device *phydev;
@@ -2303,11 +2304,12 @@ static int b44_register_phy_one(struct b44 *bp)
}
/* mask with MAC supported features */
- phydev->supported &= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_MII);
- phydev->advertising = phydev->supported;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
bp->old_link = 0;
bp->phy_addr = phydev->mdio.addr;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0e2d99c737e3..4574275ef445 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
+ unsigned int index;
u32 reg;
/* Disable RXCHK, active filters and Broadcom tag matching */
@@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
rxchk_writel(priv, reg, RXCHK_CONTROL);
+ /* Make sure we restore correct CID index in case HW lost
+ * its context during deep idle state
+ */
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ rxchk_writel(priv, priv->filters_loc[index] <<
+ RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
+ rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+ }
+
/* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false);
@@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+ priv->filters_loc[index] = nfc->fs.location;
set_bit(index, priv->filters);
return 0;
@@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
* be taken care of during suspend time by bcm_sysport_suspend_to_wol
*/
clear_bit(index, priv->filters);
+ priv->filters_loc[index] = 0;
return 0;
}
@@ -2312,7 +2324,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
struct bcm_sysport_priv *priv;
struct net_device *slave_dev;
unsigned int num_tx_queues;
- unsigned int q, start, port;
+ unsigned int q, qp, port;
struct net_device *dev;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
@@ -2351,20 +2363,61 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
priv->per_port_num_tx_queues = num_tx_queues;
- start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
- for (q = 0; q < num_tx_queues; q++) {
- ring = &priv->tx_rings[q + start];
+ for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
+ q++) {
+ ring = &priv->tx_rings[q];
+
+ if (ring->inspect)
+ continue;
/* Just remember the mapping actual programming done
* during bcm_sysport_init_tx_ring
*/
- ring->switch_queue = q;
+ ring->switch_queue = qp;
ring->switch_port = port;
ring->inspect = true;
priv->ring_map[q + port * num_tx_queues] = ring;
+ qp++;
+ }
+
+ return 0;
+}
+
+static int bcm_sysport_unmap_queues(struct notifier_block *nb,
+ struct dsa_notifier_register_info *info)
+{
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_priv *priv;
+ struct net_device *slave_dev;
+ unsigned int num_tx_queues;
+ struct net_device *dev;
+ unsigned int q, port;
+
+ priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
+ if (priv->netdev != info->master)
+ return 0;
+
+ dev = info->master;
+
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return 0;
+
+ port = info->port_number;
+ slave_dev = info->info.dev;
+
+ num_tx_queues = slave_dev->real_num_tx_queues;
+
+ for (q = 0; q < dev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
- /* Set all queues as being used now */
- set_bit(q + start, &priv->queue_bitmap);
+ if (ring->switch_port != port)
+ continue;
+
+ if (!ring->inspect)
+ continue;
+
+ ring->inspect = false;
+ priv->ring_map[q + port * num_tx_queues] = NULL;
}
return 0;
@@ -2373,14 +2426,18 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct dsa_notifier_register_info *info;
-
- if (event != DSA_PORT_REGISTER)
- return NOTIFY_DONE;
+ int ret = NOTIFY_DONE;
- info = ptr;
+ switch (event) {
+ case DSA_PORT_REGISTER:
+ ret = bcm_sysport_map_queues(nb, ptr);
+ break;
+ case DSA_PORT_UNREGISTER:
+ ret = bcm_sysport_unmap_queues(nb, ptr);
+ break;
+ }
- return notifier_from_errno(bcm_sysport_map_queues(nb, info));
+ return notifier_from_errno(ret);
}
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index a7a230884a87..0887e6356649 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -786,6 +786,7 @@ struct bcm_sysport_priv {
/* Ethtool */
u32 msg_enable;
DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
+ u32 filters_loc[RXCHK_BRCM_TAG_MAX];
struct bcm_sysport_stats64 stats64;
@@ -795,7 +796,6 @@ struct bcm_sysport_priv {
/* map information between switch port queues and local queues */
struct notifier_block dsa_notifier;
unsigned int per_port_num_tx_queues;
- unsigned long queue_bitmap;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index a4a90b6cdb46..749d0ef44371 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1105,11 +1105,39 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
+ char version[ETHTOOL_FWVERS_LEN];
+ int ext_dev_info_offset;
+ u32 mbi;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
+ memset(version, 0, sizeof(version));
+ snprintf(version, ETHTOOL_FWVERS_LEN, " storm %d.%d.%d.%d",
+ BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION);
+ strlcat(info->version, version, sizeof(info->version));
+
+ if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
+ ext_dev_info_offset = SHMEM2_RD(bp,
+ extended_dev_info_shared_addr);
+ mbi = REG_RD(bp, ext_dev_info_offset +
+ offsetof(struct extended_dev_info_shared_cfg,
+ mbi_version));
+ if (mbi) {
+ memset(version, 0, sizeof(version));
+ snprintf(version, ETHTOOL_FWVERS_LEN, "mbi %d.%d.%d ",
+ (mbi & 0xff000000) >> 24,
+ (mbi & 0x00ff0000) >> 16,
+ (mbi & 0x0000ff00) >> 8);
+ strlcpy(info->fw_version, version,
+ sizeof(info->fw_version));
+ }
+ }
+
+ memset(version, 0, sizeof(version));
+ bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ strlcat(info->fw_version, version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index f8b810313094..d9057c8bbeef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1140,6 +1140,11 @@ struct shm_dev_info { /* size */
};
+struct extended_dev_info_shared_cfg {
+ u32 reserved[18];
+ u32 mbi_version;
+ u32 mbi_date;
+};
#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
#error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d83233ae4a15..510dfc1c236b 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5731,7 +5731,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
if (realdev) {
dev = cnic_from_netdev(realdev);
if (dev) {
- vid |= VLAN_TAG_PRESENT;
+ vid |= VLAN_CFI_MASK; /* make non-zero */
cnic_rcv_netevent(dev->cnic_priv, event, vid);
cnic_put(dev);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d6f090bf644..983245c0867c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
break;
}
- return 0;
+ return ret;
}
static void bcmgenet_power_up(struct bcmgenet_priv *priv,
@@ -3612,36 +3612,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int bcmgenet_suspend(struct device *d)
-{
- struct net_device *dev = dev_get_drvdata(d);
- struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_detach(dev);
-
- bcmgenet_netif_stop(dev);
-
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
-
- /* Prepare the device for Wake-on-LAN and switch to the slow clock */
- if (device_may_wakeup(d) && priv->wolopts) {
- ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
- clk_prepare_enable(priv->clk_wol);
- } else if (priv->internal_phy) {
- ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- }
-
- /* Turn off the clocks */
- clk_disable_unprepare(priv->clk);
-
- return ret;
-}
-
static int bcmgenet_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
@@ -3719,6 +3689,39 @@ out_clk_disable:
clk_disable_unprepare(priv->clk);
return ret;
}
+
+static int bcmgenet_suspend(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+
+ bcmgenet_netif_stop(dev);
+
+ if (!device_may_wakeup(d))
+ phy_suspend(dev->phydev);
+
+ /* Prepare the device for Wake-on-LAN and switch to the slow clock */
+ if (device_may_wakeup(d) && priv->wolopts) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+ clk_prepare_enable(priv->clk_wol);
+ } else if (priv->internal_phy) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+ }
+
+ /* Turn off the clocks */
+ clk_disable_unprepare(priv->clk);
+
+ if (ret)
+ bcmgenet_resume(d);
+
+ return ret;
+}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 2fbd027f0148..57582efa362d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,6 +186,8 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ if (!(reg & MPD_EN))
+ return; /* already powered up so skip the rest */
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index a6cbaca37e94..aceb9b7b55bd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -226,7 +226,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
- if (dev->phydev->supported & PHY_1000BT_FEATURES)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ dev->phydev->supported))
port_ctrl = PORT_MODE_EXT_RVMII_50;
else
port_ctrl = PORT_MODE_EXT_RVMII_25;
@@ -317,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 432c3b867084..3b1397af81f7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -66,11 +66,6 @@
#include <uapi/linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
-
#define BAR_0 0
#define BAR_2 2
@@ -2157,7 +2152,8 @@ static void tg3_phy_start(struct tg3 *tp)
phydev->speed = tp->link_config.speed;
phydev->duplex = tp->link_config.duplex;
phydev->autoneg = tp->link_config.autoneg;
- phydev->advertising = tp->link_config.advertising;
+ ethtool_convert_legacy_u32_to_link_mode(
+ phydev->advertising, tp->link_config.advertising);
}
phy_start(phydev);
@@ -4057,8 +4053,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
do_low_power = false;
if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
struct phy_device *phydev;
- u32 phyid, advertising;
+ u32 phyid;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
@@ -4067,25 +4064,33 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tp->link_config.speed = phydev->speed;
tp->link_config.duplex = phydev->duplex;
tp->link_config.autoneg = phydev->autoneg;
- tp->link_config.advertising = phydev->advertising;
-
- advertising = ADVERTISED_TP |
- ADVERTISED_Pause |
- ADVERTISED_Autoneg |
- ADVERTISED_10baseT_Half;
+ ethtool_convert_link_mode_to_legacy_u32(
+ &tp->link_config.advertising,
+ phydev->advertising);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising);
if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
- if (tg3_flag(tp, WOL_SPEED_100MB))
- advertising |=
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full;
- else
- advertising |= ADVERTISED_10baseT_Full;
+ if (tg3_flag(tp, WOL_SPEED_100MB)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ } else {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ }
}
- phydev->advertising = advertising;
-
+ linkmode_copy(phydev->advertising, advertising);
phy_start_aneg(phydev);
phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
@@ -6135,10 +6140,16 @@ static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
}
/* tp->lock must be held */
-static u64 tg3_refclk_read(struct tg3 *tp)
+static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
{
- u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
- return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+ u64 stamp;
+
+ ptp_read_system_prets(sts);
+ stamp = tr32(TG3_EAV_REF_CLCK_LSB);
+ ptp_read_system_postts(sts);
+ stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+
+ return stamp;
}
/* tp->lock must be held */
@@ -6229,13 +6240,14 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
u64 ns;
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
tg3_full_lock(tp, 0);
- ns = tg3_refclk_read(tp);
+ ns = tg3_refclk_read(tp, sts);
ns += tp->ptp_adjust;
tg3_full_unlock(tp);
@@ -6330,7 +6342,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
- .gettime64 = tg3_ptp_gettime,
+ .gettimex64 = tg3_ptp_gettimex,
.settime64 = tg3_ptp_settime,
.enable = tg3_ptp_enable,
};
@@ -16973,32 +16985,6 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
return err;
}
-#ifdef CONFIG_SPARC
-static int tg3_get_macaddr_sparc(struct tg3 *tp)
-{
- struct net_device *dev = tp->dev;
- struct pci_dev *pdev = tp->pdev;
- struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
- int len;
-
- addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == ETH_ALEN) {
- memcpy(dev->dev_addr, addr, ETH_ALEN);
- return 0;
- }
- return -ENODEV;
-}
-
-static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
-{
- struct net_device *dev = tp->dev;
-
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
- return 0;
-}
-#endif
-
static int tg3_get_device_address(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
@@ -17006,10 +16992,8 @@ static int tg3_get_device_address(struct tg3 *tp)
int addr_ok = 0;
int err;
-#ifdef CONFIG_SPARC
- if (!tg3_get_macaddr_sparc(tp))
+ if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
return 0;
-#endif
if (tg3_flag(tp, IS_SSB_CORE)) {
err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
@@ -17071,13 +17055,8 @@ static int tg3_get_device_address(struct tg3 *tp)
}
}
- if (!is_valid_ether_addr(&dev->dev_addr[0])) {
-#ifdef CONFIG_SPARC
- if (!tg3_get_default_macaddr_sparc(tp))
- return 0;
-#endif
+ if (!is_valid_ether_addr(&dev->dev_addr[0]))
return -EINVAL;
- }
return 0;
}
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 6aeb1045c302..73632b843749 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -277,10 +277,6 @@ static int cavium_ptp_probe(struct pci_dev *pdev,
writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
- if (!clock->ptp_clock) {
- err = -ENODEV;
- goto error_stop;
- }
if (IS_ERR(clock->ptp_clock)) {
err = PTR_ERR(clock->ptp_clock);
goto error_stop;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4b3aecf98f2a..5359c1021f42 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1080,8 +1080,11 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Set the mode of the interface, RGMII/MII. */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
- int rgmii_mode = (netdev->phydev->supported &
- (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
+ int rgmii_mode =
+ (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ netdev->phydev->supported) |
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ netdev->phydev->supported)) != 0;
agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b16f4b3ef4c5..2d1ca920601e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -404,6 +404,7 @@ struct adapter_params {
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
u8 fw_caps_support; /* 32-bit Port Capabilities */
bool filter2_wr_support; /* FW support for FILTER2_WR */
+ unsigned int viid_smt_extn_support:1; /* FW returns vin and smt index */
/* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is
* used by the Port
@@ -592,6 +593,13 @@ struct port_info {
bool ptp_enable;
struct sched_table *sched_tbl;
u32 eth_flags;
+
+ /* viid and smt fields either returned by fw
+ * or decoded by parsing viid by driver.
+ */
+ u8 vin;
+ u8 vivld;
+ u8 smt_idx;
};
struct dentry;
@@ -1757,7 +1765,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
- unsigned int *rss_size);
+ unsigned int *rss_size, u8 *vivld, u8 *vin);
int t4_free_vi(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int viid);
@@ -1783,7 +1791,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, unsigned int naddr,
const u8 **addr, bool sleep_ok);
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int idx, const u8 *addr, bool persist, bool add_smt);
+ int idx, const u8 *addr, bool persist, u8 *smt_idx);
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok);
int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d49db46254cd..7f76ad9e1ad6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -453,7 +453,7 @@ static int link_start(struct net_device *dev)
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
- true);
+ &pi->smt_idx);
if (ret >= 0) {
pi->xact_addr_filt = ret;
ret = 0;
@@ -1585,28 +1585,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
/**
- * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
- * @chip: chip type
- * @viid: VI id of the given port
- *
- * Return the SMT index for this VI.
- */
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
-{
- /* In T4/T5, SMT contains 256 SMAC entries organized in
- * 128 rows of 2 entries each.
- * In T6, SMT contains 256 SMAC entries in 256 rows.
- * TODO: The below code needs to be updated when we add support
- * for 256 VFs.
- */
- if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
- return ((viid & 0x7f) << 1);
- else
- return (viid & 0x7f);
-}
-EXPORT_SYMBOL(cxgb4_tp_smt_idx);
-
-/**
* cxgb4_port_chan - get the HW channel of a port
* @dev: the net device for the port
*
@@ -2280,8 +2258,6 @@ static int cxgb_up(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
update_clip(adap);
#endif
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adap->mac_hlist);
return err;
irq_err:
@@ -2303,6 +2279,7 @@ static void cxgb_down(struct adapter *adapter)
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
+
adapter->flags &= ~FULL_INIT_DONE;
}
@@ -2863,7 +2840,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return -EADDRNOTAVAIL;
ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
- pi->xact_addr_filt, addr->sa_data, true, true);
+ pi->xact_addr_filt, addr->sa_data, true,
+ &pi->smt_idx);
if (ret < 0)
return ret;
@@ -4467,6 +4445,15 @@ static int adap_init0(struct adapter *adap)
adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
}
+ /* Check if FW supports returning vin and smt index.
+ * If this is not supported, driver will interpret
+ * these values from viid.
+ */
+ params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
+
/*
* Get device capabilities so we can determine what resources we need
* to manage.
@@ -4777,14 +4764,26 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
for_each_port(adap, i) {
- struct port_info *p = adap2pinfo(adap, i);
+ struct port_info *pi = adap2pinfo(adap, i);
+ u8 vivld = 0, vin = 0;
- ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
- NULL, NULL);
+ ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
+ NULL, NULL, &vivld, &vin);
if (ret < 0)
return PCI_ERS_RESULT_DISCONNECT;
- p->viid = ret;
- p->xact_addr_filt = -1;
+ pi->viid = ret;
+ pi->xact_addr_filt = -1;
+ /* If fw supports returning the VIN as part of FW_VI_CMD,
+ * save the returned values.
+ */
+ if (adap->params.viid_smt_extn_support) {
+ pi->vivld = vivld;
+ pi->vin = vin;
+ } else {
+ /* Retrieve the values from VIID */
+ pi->vivld = FW_VIID_VIVLD_G(pi->viid);
+ pi->vin = FW_VIID_VIN_G(pi->viid);
+ }
}
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
@@ -5621,6 +5620,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(is_t5(adapter->params.chip) ? STATMODE_V(0) :
T6_STATMODE_V(0)));
+ /* Initialize hash mac addr list */
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
for_each_port(adapter, i) {
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_ETH_QSETS);
@@ -5899,6 +5901,7 @@ fw_attach_fail:
static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
if (!adapter) {
pci_release_regions(pdev);
@@ -5948,6 +5951,12 @@ static void remove_one(struct pci_dev *pdev)
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
free_some_resources(adapter);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 99022c0898b5..4852febbfec3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -495,14 +495,11 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
- u32 viid = cxgb4_port_viid(dev);
- u32 vf = FW_VIID_VIN_G(viid);
- u32 pf = FW_VIID_PFN_G(viid);
- u32 vld = FW_VIID_VIVLD_G(viid);
-
- ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
- FT_VNID_ID_PF_V(pf) |
- FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
+ struct port_info *pi = (struct port_info *)netdev_priv(dev);
+
+ ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) |
+ FT_VNID_ID_PF_V(adap->pf) |
+ FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift;
}
return ntuple;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cb523949c812..e8c34292a0ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -5880,7 +5880,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
{
int i, ofst = idx * 4;
u32 data_reg, mask_reg, cfg;
- u32 multitrc = TRCMULTIFILTER_F;
if (!enable) {
t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
@@ -5900,7 +5899,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
* maximum packet capture size of 9600 bytes is recommended.
* Also in this mode, only trace0 can be enabled and running.
*/
- multitrc = 0;
if (tp->snap_len > 9600 || idx)
return -EINVAL;
}
@@ -7141,21 +7139,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size)
{
unsigned int page_shift = fls(page_size) - 1;
- unsigned int sge_hps = page_shift - 10;
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
- t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
- HOSTPAGESIZEPF0_V(sge_hps) |
- HOSTPAGESIZEPF1_V(sge_hps) |
- HOSTPAGESIZEPF2_V(sge_hps) |
- HOSTPAGESIZEPF3_V(sge_hps) |
- HOSTPAGESIZEPF4_V(sge_hps) |
- HOSTPAGESIZEPF5_V(sge_hps) |
- HOSTPAGESIZEPF6_V(sge_hps) |
- HOSTPAGESIZEPF7_V(sge_hps));
-
if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
@@ -7488,7 +7475,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
*/
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
- unsigned int *rss_size)
+ unsigned int *rss_size, u8 *vivld, u8 *vin)
{
int ret;
struct fw_vi_cmd c;
@@ -7523,6 +7510,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
if (rss_size)
*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+
+ if (vivld)
+ *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
+
+ if (vin)
+ *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
+
return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
}
@@ -7980,7 +7974,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
* MAC value.
*/
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int idx, const u8 *addr, bool persist, bool add_smt)
+ int idx, const u8 *addr, bool persist, u8 *smt_idx)
{
int ret, mode;
struct fw_vi_mac_cmd c;
@@ -7989,7 +7983,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
- mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+ mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
@@ -8006,6 +8000,23 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
+ if (smt_idx) {
+ if (adap->params.viid_smt_extn_support) {
+ *smt_idx = FW_VI_MAC_CMD_SMTID_G
+ (be32_to_cpu(c.op_to_viid));
+ } else {
+ /* In T4/T5, SMT contains 256 SMAC entries
+ * organized in 128 rows of 2 entries each.
+ * In T6, SMT contains 256 SMAC entries in
+ * 256 rows.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
+ CHELSIO_T5)
+ *smt_idx = (viid & FW_VIID_VIN_M) << 1;
+ else
+ *smt_idx = (viid & FW_VIID_VIN_M);
+ }
+ }
}
return ret;
}
@@ -8593,7 +8604,7 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
{
unsigned int fw_caps = pi->adapter->params.fw_caps_support;
struct fw_port_cmd port_cmd;
- unsigned int action, link_ok, speed, mtu;
+ unsigned int action, link_ok, mtu;
fw_port_cap32_t linkattr;
int ret;
@@ -8627,7 +8638,6 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
mtu = FW_PORT_CMD_MTU32_G(
be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
}
- speed = fwcap_to_speed(linkattr);
*link_okp = link_ok;
*speedp = fwcap_to_speed(linkattr);
@@ -9374,6 +9384,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
enum fw_port_type port_type;
int mdio_addr;
fw_port_cap32_t pcaps, acaps;
+ u8 vivld = 0, vin = 0;
int ret;
/* If we haven't yet determined whether we're talking to Firmware
@@ -9428,7 +9439,8 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
acaps = be32_to_cpu(cmd.u.info32.acaps32);
}
- ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+ ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
+ &vivld, &vin);
if (ret < 0)
return ret;
@@ -9437,6 +9449,18 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
pi->lport = port;
pi->rss_size = rss_size;
+ /* If fw supports returning the VIN as part of FW_VI_CMD,
+ * save the returned values.
+ */
+ if (adapter->params.viid_smt_extn_support) {
+ pi->vivld = vivld;
+ pi->vin = vin;
+ } else {
+ /* Retrieve the values from VIID */
+ pi->vivld = FW_VIID_VIVLD_G(pi->viid);
+ pi->vin = FW_VIID_VIN_G(pi->viid);
+ }
+
pi->port_type = port_type;
pi->mdio_addr = mdio_addr;
pi->mod_type = FW_PORT_MOD_TYPE_NA;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60df66f4d21c..bf7325f6d553 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -217,6 +217,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 57584ab32043..1d9b3e1e5f94 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1253,6 +1253,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20,
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
+ FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
};
/*
@@ -2109,6 +2110,19 @@ struct fw_vi_cmd {
#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S)
#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U)
+#define FW_VI_CMD_VFVLD_S 24
+#define FW_VI_CMD_VFVLD_M 0x1
+#define FW_VI_CMD_VFVLD_V(x) ((x) << FW_VI_CMD_VFVLD_S)
+#define FW_VI_CMD_VFVLD_G(x) \
+ (((x) >> FW_VI_CMD_VFVLD_S) & FW_VI_CMD_VFVLD_M)
+#define FW_VI_CMD_VFVLD_F FW_VI_CMD_VFVLD_V(1U)
+
+#define FW_VI_CMD_VIN_S 16
+#define FW_VI_CMD_VIN_M 0xff
+#define FW_VI_CMD_VIN_V(x) ((x) << FW_VI_CMD_VIN_S)
+#define FW_VI_CMD_VIN_G(x) \
+ (((x) >> FW_VI_CMD_VIN_S) & FW_VI_CMD_VIN_M)
+
#define FW_VI_CMD_VIID_S 0
#define FW_VI_CMD_VIID_M 0xfff
#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S)
@@ -2182,6 +2196,12 @@ struct fw_vi_mac_cmd {
} u;
};
+#define FW_VI_MAC_CMD_SMTID_S 12
+#define FW_VI_MAC_CMD_SMTID_M 0xff
+#define FW_VI_MAC_CMD_SMTID_V(x) ((x) << FW_VI_MAC_CMD_SMTID_S)
+#define FW_VI_MAC_CMD_SMTID_G(x) \
+ (((x) >> FW_VI_MAC_CMD_SMTID_S) & FW_VI_MAC_CMD_SMTID_M)
+
#define FW_VI_MAC_CMD_VIID_S 0
#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ff84791a0ff8..8a2ad6b387ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -722,6 +722,7 @@ static int adapter_up(struct adapter *adapter)
if (adapter->flags & USING_MSIX)
name_msix_vecs(adapter);
+
adapter->flags |= FULL_INIT_DONE;
}
@@ -747,8 +748,6 @@ static int adapter_up(struct adapter *adapter)
enable_rx(adapter);
t4vf_sge_start(adapter);
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adapter->mac_hlist);
return 0;
}
@@ -3036,6 +3035,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
if (err)
goto err_unmap_bar;
+ /* Initialize hash mac addr list */
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
/*
* Allocate our "adapter ports" and stitch everything together.
*/
@@ -3287,6 +3289,7 @@ err_disable_device:
static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
/*
* Tear down driver state associated with device.
@@ -3337,6 +3340,11 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
kfree(adapter->mbox_log);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
kfree(adapter);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c5ad7a4f4d83..245abf0d19c0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -796,7 +796,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
u16 vlan_tag;
vlan_tag = skb_vlan_tag_get(skb);
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan_prio = skb_vlan_tag_get_prio(skb);
/* If vlan priority provided by OS is NOT in available bmap */
if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
@@ -1049,30 +1049,35 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
struct be_wrb_params
*wrb_params)
{
+ bool insert_vlan = false;
u16 vlan_tag = 0;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return skb;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ insert_vlan = true;
+ }
if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
- if (!vlan_tag)
+ if (!insert_vlan) {
vlan_tag = adapter->pvid;
+ insert_vlan = true;
+ }
/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
* skip VLAN insertion
*/
BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
}
- if (vlan_tag) {
+ if (insert_vlan) {
skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
vlan_tag);
if (unlikely(!skb))
return skb;
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
}
/* Insert the outer VLAN, if any */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6e0f47f2c8a3..9510c9d78858 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2475,6 +2475,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
static int dpaa_phy_init(struct net_device *net_dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
@@ -2491,7 +2492,9 @@ static int dpaa_phy_init(struct net_device *net_dev)
}
/* Remove any features not supported by the controller */
- phy_dev->supported &= mac_dev->if_support;
+ ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+
phy_support_asym_pause(phy_dev);
mac_dev->phy_dev = phy_dev;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 13d6e2272ece..62497119c85f 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -529,6 +529,75 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
return 0;
}
+static int dpaa_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct qman_portal *portal;
+ u32 period;
+ u8 thresh;
+
+ portal = qman_get_affine_portal(smp_processor_id());
+ qman_portal_get_iperiod(portal, &period);
+ qman_dqrr_get_ithresh(portal, &thresh);
+
+ c->rx_coalesce_usecs = period;
+ c->rx_max_coalesced_frames = thresh;
+ c->use_adaptive_rx_coalesce = false;
+
+ return 0;
+}
+
+static int dpaa_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ const cpumask_t *cpus = qman_affine_cpus();
+ bool needs_revert[NR_CPUS] = {false};
+ struct qman_portal *portal;
+ u32 period, prev_period;
+ u8 thresh, prev_thresh;
+ int cpu, res;
+
+ if (c->use_adaptive_rx_coalesce)
+ return -EINVAL;
+
+ period = c->rx_coalesce_usecs;
+ thresh = c->rx_max_coalesced_frames;
+
+ /* save previous values */
+ portal = qman_get_affine_portal(smp_processor_id());
+ qman_portal_get_iperiod(portal, &prev_period);
+ qman_dqrr_get_ithresh(portal, &prev_thresh);
+
+ /* set new values */
+ for_each_cpu(cpu, cpus) {
+ portal = qman_get_affine_portal(cpu);
+ res = qman_portal_set_iperiod(portal, period);
+ if (res)
+ goto revert_values;
+ res = qman_dqrr_set_ithresh(portal, thresh);
+ if (res) {
+ qman_portal_set_iperiod(portal, prev_period);
+ goto revert_values;
+ }
+ needs_revert[cpu] = true;
+ }
+
+ return 0;
+
+revert_values:
+ /* restore previous values */
+ for_each_cpu(cpu, cpus) {
+ if (!needs_revert[cpu])
+ continue;
+ portal = qman_get_affine_portal(cpu);
+ /* previous values will not fail, ignore return value */
+ qman_portal_set_iperiod(portal, prev_period);
+ qman_dqrr_set_ithresh(portal, prev_thresh);
+ }
+
+ return res;
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
@@ -545,4 +614,6 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_rxnfc = dpaa_get_rxnfc,
.set_rxnfc = dpaa_set_rxnfc,
.get_ts_info = dpaa_get_ts_info,
+ .get_coalesce = dpaa_get_coalesce,
+ .set_coalesce = dpaa_set_coalesce,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 88f7acce38dc..be841718eb49 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -13,7 +13,8 @@
#include <linux/iommu.h>
#include <linux/net_tstamp.h>
#include <linux/fsl/mc.h>
-
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <net/sock.h>
#include "dpaa2-eth.h"
@@ -86,7 +87,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
skb_free_frag(sg_vaddr);
if (dpaa2_sg_is_final(&sgt[i]))
@@ -144,7 +145,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
@@ -199,12 +200,146 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
return skb;
}
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ skb_free_frag(vaddr);
+ }
+}
+
+static void xdp_release_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
+{
+ int err;
+
+ ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
+ if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
+ return;
+
+ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+ ch->xdp.drop_bufs,
+ ch->xdp.drop_cnt)) == -EBUSY)
+ cpu_relax();
+
+ if (err) {
+ free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+ ch->buf_count -= ch->xdp.drop_cnt;
+ }
+
+ ch->xdp.drop_cnt = 0;
+}
+
+static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
+{
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_faead *faead;
+ u32 ctrl, frc;
+ int i, err;
+
+ /* Mark the egress frame hardware annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
+ /* Instruct hardware to release the FD buffer directly into
+ * the buffer pool once transmission is completed, instead of
+ * sending a Tx confirmation frame to us
+ */
+ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
+ faead = dpaa2_get_faead(buf_start, false);
+ faead->ctrl = cpu_to_le32(ctrl);
+ faead->conf_fqid = 0;
+
+ fq = &priv->fq[queue_id];
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, 0,
+ fq->tx_qdbin, fd);
+ if (err != -EBUSY)
+ break;
+ }
+
+ return err;
+}
+
+static u32 run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 xdp_act = XDP_PASS;
+ int err;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ rcu_read_lock();
+
+ xdp_prog = READ_ONCE(ch->xdp.prog);
+ if (!xdp_prog)
+ goto out;
+
+ xdp.data = vaddr + dpaa2_fd_get_offset(fd);
+ xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp_set_data_meta_invalid(&xdp);
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ /* xdp.data pointer may have changed */
+ dpaa2_fd_set_offset(fd, xdp.data - vaddr);
+ dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
+ if (err) {
+ xdp_release_buf(priv, ch, addr);
+ percpu_stats->tx_errors++;
+ ch->stats.xdp_tx_err++;
+ } else {
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.xdp_tx++;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ case XDP_DROP:
+ xdp_release_buf(priv, ch, addr);
+ ch->stats.xdp_drop++;
+ break;
+ }
+
+out:
+ rcu_read_unlock();
+ return xdp_act;
+}
+
/* Main Rx frame processing routine */
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi,
- u16 queue_id)
+ struct dpaa2_eth_fq *fq)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
@@ -216,12 +351,14 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_fas *fas;
void *buf_data;
u32 status = 0;
+ u32 xdp_act;
/* Tracing point */
trace_dpaa2_rx_fd(priv->net_dev, fd);
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false);
prefetch(fas);
@@ -232,8 +369,21 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
+ xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ return;
+ }
+
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
+ WARN_ON(priv->xdp_prog);
+
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
skb_free_frag(vaddr);
percpu_extras->rx_sg_frames++;
@@ -267,12 +417,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
}
skb->protocol = eth_type_trans(skb, priv->net_dev);
- skb_record_rx_queue(skb, queue_id);
+ skb_record_rx_queue(skb, fq->flowid);
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- napi_gro_receive(napi, skb);
+ napi_gro_receive(&ch->napi, skb);
return;
@@ -289,7 +439,7 @@ err_frame_format:
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
static int consume_frames(struct dpaa2_eth_channel *ch,
- enum dpaa2_eth_fq_type *type)
+ struct dpaa2_eth_fq **src)
{
struct dpaa2_eth_priv *priv = ch->priv;
struct dpaa2_eth_fq *fq = NULL;
@@ -312,7 +462,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
fd = dpaa2_dq_fd(dq);
fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
- fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+ fq->consume(priv, ch, fd, fq);
cleaned++;
} while (!is_last);
@@ -320,13 +470,12 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
return 0;
fq->stats.frames += cleaned;
- ch->stats.frames += cleaned;
/* A dequeue operation only pulls frames from a single queue
- * into the store. Return the frame queue type as an out param.
+ * into the store. Return the frame queue as an out param.
*/
- if (type)
- *type = fq->type;
+ if (src)
+ *src = fq;
return cleaned;
}
@@ -571,8 +720,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct dpaa2_eth_fq *fq;
+ struct netdev_queue *nq;
u16 queue_mapping;
unsigned int needed_headroom;
+ u32 fd_len;
int err, i;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
@@ -644,8 +795,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Clean up everything, including freeing the skb */
free_tx_fd(priv, &fd);
} else {
+ fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ percpu_stats->tx_bytes += fd_len;
+
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
}
return NETDEV_TX_OK;
@@ -661,11 +816,11 @@ err_alloc_headroom:
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch __always_unused,
const struct dpaa2_fd *fd,
- struct napi_struct *napi __always_unused,
- u16 queue_id __always_unused)
+ struct dpaa2_eth_fq *fq)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 fd_len = dpaa2_fd_get_len(fd);
u32 fd_errors;
/* Tracing point */
@@ -673,7 +828,10 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
- percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+ percpu_extras->tx_conf_bytes += fd_len;
+
+ fq->dq_frames++;
+ fq->dq_bytes += fd_len;
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -735,23 +893,6 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
-/* Free buffers acquired from the buffer pool or which were meant to
- * be released in the pool
- */
-static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
-{
- struct device *dev = priv->net_dev->dev.parent;
- void *vaddr;
- int i;
-
- for (i = 0; i < count; i++) {
- vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- skb_free_frag(vaddr);
- }
-}
-
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
@@ -775,7 +916,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
buf = PTR_ALIGN(buf, priv->rx_buf_align);
addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@@ -934,8 +1075,9 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_channel *ch;
struct dpaa2_eth_priv *priv;
int rx_cleaned = 0, txconf_cleaned = 0;
- enum dpaa2_eth_fq_type type = 0;
- int store_cleaned;
+ struct dpaa2_eth_fq *fq, *txc_fq = NULL;
+ struct netdev_queue *nq;
+ int store_cleaned, work_done;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -949,18 +1091,25 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
/* Refill pool if appropriate */
refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &type);
- if (type == DPAA2_RX_FQ)
+ store_cleaned = consume_frames(ch, &fq);
+ if (!store_cleaned)
+ break;
+ if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
- else
+ } else {
txconf_cleaned += store_cleaned;
+ /* We have a single Tx conf FQ on this channel */
+ txc_fq = fq;
+ }
/* If we either consumed the whole NAPI budget with Rx frames
* or we reached the Tx confirmations threshold, we're done.
*/
if (rx_cleaned >= budget ||
- txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
- return budget;
+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+ work_done = budget;
+ goto out;
+ }
} while (store_cleaned);
/* We didn't consume the entire budget, so finish napi and
@@ -974,7 +1123,18 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
ch->nctx.desired_cpu);
- return max(rx_cleaned, 1);
+ work_done = max(rx_cleaned, 1);
+
+out:
+ if (txc_fq) {
+ nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
+ netdev_tx_completed_queue(nq, txc_fq->dq_frames,
+ txc_fq->dq_bytes);
+ txc_fq->dq_frames = 0;
+ txc_fq->dq_bytes = 0;
+ }
+
+ return work_done;
}
static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -1400,6 +1560,174 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
}
+static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
+{
+ int mfl, linear_mfl;
+
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
+ dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
+
+ if (mfl > linear_mfl) {
+ netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
+ linear_mfl - VLAN_ETH_HLEN);
+ return false;
+ }
+
+ return true;
+}
+
+static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+{
+ int mfl, err;
+
+ /* We enforce a maximum Rx frame length based on MTU only if we have
+ * an XDP program attached (in order to avoid Rx S/G frames).
+ * Otherwise, we accept all incoming frames as long as they are not
+ * larger than maximum size supported in hardware
+ */
+ if (has_xdp)
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ else
+ mfl = DPAA2_ETH_MFL;
+
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (!priv->xdp_prog)
+ goto out;
+
+ if (!xdp_mtu_valid(priv, new_mtu))
+ return -EINVAL;
+
+ err = set_rx_mfl(priv, new_mtu, true);
+ if (err)
+ return err;
+
+out:
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+{
+ struct dpni_buffer_layout buf_layout = {0};
+ int err;
+
+ err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
+ return err;
+ }
+
+ /* Reserve extra headroom for XDP header size changes */
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
+ (has_xdp ? XDP_PACKET_HEADROOM : 0);
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_eth_channel *ch;
+ struct bpf_prog *old;
+ bool up, need_update;
+ int i, err;
+
+ if (prog && !xdp_mtu_valid(priv, dev->mtu))
+ return -EINVAL;
+
+ if (prog) {
+ prog = bpf_prog_add(prog, priv->num_channels);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ up = netif_running(dev);
+ need_update = (!!priv->xdp_prog != !!prog);
+
+ if (up)
+ dpaa2_eth_stop(dev);
+
+ /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
+ * Also, when switching between xdp/non-xdp modes we need to reconfigure
+ * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
+ * so we are sure no old format buffers will be used from now on.
+ */
+ if (need_update) {
+ err = set_rx_mfl(priv, dev->mtu, !!prog);
+ if (err)
+ goto out_err;
+ err = update_rx_buffer_headroom(priv, !!prog);
+ if (err)
+ goto out_err;
+ }
+
+ old = xchg(&priv->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ old = xchg(&ch->xdp.prog, prog);
+ if (old)
+ bpf_prog_put(old);
+ }
+
+ if (up) {
+ err = dpaa2_eth_open(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+
+out_err:
+ if (prog)
+ bpf_prog_sub(prog, priv->num_channels);
+ if (up)
+ dpaa2_eth_open(dev);
+
+ return err;
+}
+
+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return setup_xdp(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct net_device_ops dpaa2_eth_ops = {
.ndo_open = dpaa2_eth_open,
.ndo_start_xmit = dpaa2_eth_tx,
@@ -1409,6 +1737,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
.ndo_set_features = dpaa2_eth_set_features,
.ndo_do_ioctl = dpaa2_eth_ioctl,
+ .ndo_change_mtu = dpaa2_eth_change_mtu,
+ .ndo_bpf = dpaa2_eth_xdp,
};
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -1434,8 +1764,11 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
FSL_MC_POOL_DPCON, &dpcon);
if (err) {
- dev_info(dev, "Not enough DPCONs, will go on as-is\n");
- return NULL;
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ return ERR_PTR(err);
}
err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
@@ -1493,8 +1826,10 @@ alloc_channel(struct dpaa2_eth_priv *priv)
return NULL;
channel->dpcon = setup_dpcon(priv);
- if (!channel->dpcon)
+ if (IS_ERR_OR_NULL(channel->dpcon)) {
+ err = PTR_ERR(channel->dpcon);
goto err_setup;
+ }
err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
&attr);
@@ -1513,7 +1848,7 @@ err_get_attr:
free_dpcon(priv, channel->dpcon);
err_setup:
kfree(channel);
- return NULL;
+ return ERR_PTR(err);
}
static void free_channel(struct dpaa2_eth_priv *priv,
@@ -1547,10 +1882,11 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
for_each_online_cpu(i) {
/* Try to allocate a channel */
channel = alloc_channel(priv);
- if (!channel) {
- dev_info(dev,
- "No affine channel for cpu %d and above\n", i);
- err = -ENODEV;
+ if (IS_ERR_OR_NULL(channel)) {
+ err = PTR_ERR(channel);
+ if (err != -EPROBE_DEFER)
+ dev_info(dev,
+ "No affine channel for cpu %d and above\n", i);
goto err_alloc_ch;
}
@@ -1597,7 +1933,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
/* Stop if we already have enough channels to accommodate all
* RX and TX conf queues
*/
- if (priv->num_channels == dpaa2_eth_queue_count(priv))
+ if (priv->num_channels == priv->dpni_attrs.num_queues)
break;
}
@@ -1608,9 +1944,12 @@ err_set_cdan:
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
+ if (err == -EPROBE_DEFER)
+ return err;
+
if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
- return err;
+ return -ENODEV;
}
dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
@@ -1732,7 +2071,10 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
&dpbp_dev);
if (err) {
- dev_err(dev, "DPBP device allocation failed\n");
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 452a8e9c4f0e..69c965de192b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -139,7 +139,9 @@ struct dpaa2_faead {
};
#define DPAA2_FAEAD_A2V 0x20000000
+#define DPAA2_FAEAD_A4V 0x08000000
#define DPAA2_FAEAD_UPDV 0x00001000
+#define DPAA2_FAEAD_EBDDV 0x00002000
#define DPAA2_FAEAD_UPD 0x00000010
/* Accessors for the hardware annotation fields that we use */
@@ -243,12 +245,14 @@ struct dpaa2_eth_fq_stats {
struct dpaa2_eth_ch_stats {
/* Volatile dequeues retried due to portal busy */
__u64 dequeue_portal_busy;
- /* Number of CDANs; useful to estimate avg NAPI len */
- __u64 cdan;
- /* Number of frames received on queues from this channel */
- __u64 frames;
/* Pull errors */
__u64 pull_err;
+ /* Number of CDANs; useful to estimate avg NAPI len */
+ __u64 cdan;
+ /* XDP counters */
+ __u64 xdp_drop;
+ __u64 xdp_tx;
+ __u64 xdp_tx_err;
};
/* Maximum number of queues associated with a DPNI */
@@ -271,17 +275,24 @@ struct dpaa2_eth_fq {
u32 tx_qdbin;
u16 flowid;
int target_cpu;
+ u32 dq_frames;
+ u32 dq_bytes;
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
void (*consume)(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi,
- u16 queue_id);
+ struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
};
+struct dpaa2_eth_ch_xdp {
+ struct bpf_prog *prog;
+ u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
+ int drop_cnt;
+};
+
struct dpaa2_eth_channel {
struct dpaa2_io_notification_ctx nctx;
struct fsl_mc_device *dpcon;
@@ -293,6 +304,7 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_priv *priv;
int buf_count;
struct dpaa2_eth_ch_stats stats;
+ struct dpaa2_eth_ch_xdp xdp;
};
struct dpaa2_eth_dist_fields {
@@ -352,6 +364,7 @@ struct dpaa2_eth_priv {
u64 rx_hash_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
+ struct bpf_prog *xdp_prog;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
@@ -434,9 +447,10 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
DPAA2_ETH_RX_HWA_SIZE;
}
+/* We have exactly one {Rx, Tx conf} queue per channel */
static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
{
- return priv->dpni_attrs.num_queues;
+ return priv->num_channels;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 26bd5a2bd8ed..0c831bffeb92 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -45,6 +45,9 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] dequeue portal busy",
"[drv] channel pull errors",
"[drv] cdan",
+ "[drv] xdp drop",
+ "[drv] xdp tx",
+ "[drv] xdp tx errors",
};
#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
@@ -174,8 +177,6 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
int j, k, err;
int num_cnt;
union dpni_statistics dpni_stats;
- u64 cdan = 0;
- u64 portal_busy = 0, pull_err = 0;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_drv_stats *extras;
struct dpaa2_eth_ch_stats *ch_stats;
@@ -212,16 +213,12 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
i += j;
- for (j = 0; j < priv->num_channels; j++) {
- ch_stats = &priv->channel[j]->stats;
- cdan += ch_stats->cdan;
- portal_busy += ch_stats->dequeue_portal_busy;
- pull_err += ch_stats->pull_err;
+ /* Per-channel stats */
+ for (k = 0; k < priv->num_channels; k++) {
+ ch_stats = &priv->channel[k]->stats;
+ for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
+ *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
-
- *(data + i++) = portal_busy;
- *(data + i++) = pull_err;
- *(data + i++) = cdan;
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 84b942b1eccc..9b150db3b510 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -140,7 +140,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
if (err) {
- dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
goto err_exit;
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d79e4e009d63..71f4205f14e7 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -393,7 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
*/
/* get local capabilities */
- lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
/* get link partner capabilities */
rmt_adv = 0;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 82722d05fedb..88a396fd242f 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -473,7 +473,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
- if (strcmp(tbi->type, "tbi-phy") == 0) {
+ if (of_node_is_type(tbi, "tbi-phy")) {
dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n",
tbi);
break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3c8da1a18ba0..0e102c764b13 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1784,14 +1784,20 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
- uint gigabit_support =
- priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
struct phy_device *phydev;
struct ethtool_eee edata;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
+
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1809,8 +1815,8 @@ static int init_phy(struct net_device *dev)
gfar_configure_serdes(dev);
/* Remove any features not supported by the controller */
- phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
- phydev->advertising = phydev->supported;
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
/* Add support for flow control */
phy_support_asym_pause(phydev);
@@ -3656,7 +3662,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
val |= MACCFG1_TX_FLOW;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 0d76e15cd6dd..241325c35cb4 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1134,11 +1134,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
prio = vlan_tci_prio(rule);
prio_mask = vlan_tci_priom(rule);
- if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
- vlan |= RQFPR_CFI;
- vlan_mask |= RQFPR_CFI;
- } else if (cfi != VLAN_TAG_PRESENT &&
- cfi_mask == VLAN_TAG_PRESENT) {
+ if (cfi_mask) {
+ if (cfi)
+ vlan |= RQFPR_CFI;
vlan_mask |= RQFPR_CFI;
}
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 32e02700feaa..2e978cb8b28c 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1742,12 +1742,7 @@ static int init_phy(struct net_device *dev)
if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
uec_configure_serdes(dev);
- phy_set_max_speed(phydev, SPEED_100);
-
- if (priv->max_speed == SPEED_1000)
- phydev->supported |= ADVERTISED_1000baseT_Full;
-
- phydev->advertising = phydev->supported;
+ phy_set_max_speed(phydev, priv->max_speed);
priv->phydev = phydev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 28e907831b0e..c62378c07e70 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1163,6 +1163,7 @@ static void hns_nic_adjust_link(struct net_device *ndev)
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct phy_device *phy_dev = h->phy_dev;
int ret;
@@ -1180,8 +1181,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
- phy_dev->supported &= h->if_support;
- phy_dev->advertising = phy_dev->supported;
+ ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+ linkmode_copy(phy_dev->advertising, phy_dev->supported);
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 002534f12b66..d01bf536eb86 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -9,6 +9,6 @@ obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
+hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 038326cfda93..4d9cf39da48c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -36,6 +36,9 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+ HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
+ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
+ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
};
/* below are per-VF mac-vlan subcodes */
@@ -85,6 +88,12 @@ struct hclge_mbx_pf_to_vf_cmd {
u16 msg[8];
};
+struct hclge_vf_rst_cmd {
+ u8 dest_vfid;
+ u8 vf_rst;
+ u8 rsv[22];
+};
+
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 055b40606dbc..a1707b77c47f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -52,6 +52,7 @@
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
+#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,6 +66,9 @@
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+#define hnae3_dev_gro_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
@@ -124,7 +128,10 @@ enum hnae3_reset_notify_type {
enum hnae3_reset_type {
HNAE3_VF_RESET,
+ HNAE3_VF_FUNC_RESET,
+ HNAE3_VF_PF_FUNC_RESET,
HNAE3_VF_FULL_RESET,
+ HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
@@ -132,6 +139,11 @@ enum hnae3_reset_type {
HNAE3_NONE_RESET,
};
+enum hnae3_flr_state {
+ HNAE3_FLR_DOWN,
+ HNAE3_FLR_DONE,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -162,6 +174,7 @@ struct hnae3_client_ops {
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
+ enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -197,6 +210,10 @@ struct hnae3_ae_dev {
* Enable the hardware
* stop()
* Disable the hardware
+ * start_client()
+ * Inform the hclge that client has been started
+ * stop_client()
+ * Inform the hclge that client has been stopped
* get_status()
* Get the carrier state of the back channel of the handle, 1 for ok, 0 for
* non-ok
@@ -292,17 +309,22 @@ struct hnae3_ae_dev {
* Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ * Enable/disable HW GRO
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
-
+ void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
+ void (*flr_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
int (*start)(struct hnae3_handle *handle);
void (*stop)(struct hnae3_handle *handle);
+ int (*client_start)(struct hnae3_handle *handle);
+ void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex);
@@ -403,6 +425,8 @@ struct hnae3_ae_ops {
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
@@ -429,7 +453,12 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
+ int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
+ bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
+ bool (*ae_dev_resetting)(struct hnae3_handle *handle);
+ unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+ int (*set_gro_en)(struct hnae3_handle *handle, int enable);
};
struct hnae3_dcb_ops {
@@ -488,6 +517,14 @@ struct hnae3_roce_private_info {
void __iomem *roce_io_base;
int base_vector;
int num_vectors;
+
+ /* The below attributes defined for RoCE client, hnae3 gives
+ * initial values to them, and RoCE client can modify and use
+ * them.
+ */
+ unsigned long reset_state;
+ unsigned long instance_state;
+ unsigned long state;
};
struct hnae3_unic_private_info {
@@ -520,9 +557,6 @@ struct hnae3_handle {
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
u64 flags; /* Indicate the capabilities for this handle*/
- unsigned long last_reset_time;
- enum hnae3_reset_type reset_level;
-
union {
struct net_device *netdev; /* first member */
struct hnae3_knic_private_info kinfo;
@@ -533,6 +567,7 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
u8 netdev_flags;
+ struct dentry *hnae3_dbgfs;
};
#define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index ea5f8a84070d..b6fabbbdfd5b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getets)
return h->kinfo.dcb_ops->ieee_getets(h, ets);
@@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setets)
return h->kinfo.dcb_ops->ieee_setets(h, ets);
@@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getpfc)
return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
@@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setpfc)
return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
new file mode 100644
index 000000000000..86d667a3730a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+
+#define HNS3_DBG_READ_LEN 256
+
+static struct dentry *hns3_dbgfs_root;
+
+static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_nic_ring_data *ring_data;
+ struct hns3_enet_ring *ring;
+ u32 base_add_l, base_add_h;
+ u32 queue_num, queue_max;
+ u32 value, i = 0;
+ int cnt;
+
+ if (!priv->ring_data) {
+ dev_err(&h->pdev->dev, "ring_data is NULL\n");
+ return -EFAULT;
+ }
+
+ queue_max = h->kinfo.num_tqps;
+ cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
+ if (cnt)
+ queue_num = 0;
+ else
+ queue_max = queue_num + 1;
+
+ dev_info(&h->pdev->dev, "queue info\n");
+
+ if (queue_num >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev,
+ "Queue number(%u) is out of range(%u)\n", queue_num,
+ h->kinfo.num_tqps - 1);
+ return -EINVAL;
+ }
+
+ ring_data = priv->ring_data;
+ for (i = queue_num; i < queue_max; i++) {
+ /* Each cycle needs to determine whether the instance is reset,
+ * to prevent reference to invalid memory. And need to ensure
+ * that the following code is executed within 100ms.
+ */
+ if (test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+
+ ring = ring_data[i + h->kinfo.num_tqps].ring;
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_L_REG);
+ dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
+ base_add_h, base_add_l);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_NUM_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_LEN_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_TAIL_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_FBDNUM_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
+
+ ring = ring_data[i].ring;
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_L_REG);
+ dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
+ base_add_h, base_add_l);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TC_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
+ value);
+ }
+
+ return 0;
+}
+
+static void hns3_dbg_help(struct hnae3_handle *h)
+{
+ dev_info(&h->pdev->dev, "available commands\n");
+ dev_info(&h->pdev->dev, "queue info [number]\n");
+ dev_info(&h->pdev->dev, "dump fd tcam\n");
+ dev_info(&h->pdev->dev, "dump tc\n");
+ dev_info(&h->pdev->dev, "dump tm\n");
+ dev_info(&h->pdev->dev, "dump qos pause cfg\n");
+ dev_info(&h->pdev->dev, "dump qos pri map\n");
+ dev_info(&h->pdev->dev, "dump qos buf cfg\n");
+}
+
+static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int uncopy_bytes;
+ char *buf;
+ int len;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count < HNS3_DBG_READ_LEN)
+ return -ENOSPC;
+
+ buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
+ uncopy_bytes = copy_to_user(buffer, buf, len);
+
+ kfree(buf);
+
+ if (uncopy_bytes)
+ return -EFAULT;
+
+ return (*ppos = len);
+}
+
+static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hnae3_handle *handle = filp->private_data;
+ struct hns3_nic_priv *priv = handle->priv;
+ char *cmd_buf, *cmd_buf_tmp;
+ int uncopied_bytes;
+ int ret = 0;
+
+ if (*ppos != 0)
+ return 0;
+
+ /* Judge if the instance is being reset. */
+ if (test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+
+ uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
+ if (uncopied_bytes) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ if (strncmp(cmd_buf, "help", 4) == 0)
+ hns3_dbg_help(handle);
+ else if (strncmp(cmd_buf, "queue info", 10) == 0)
+ ret = hns3_dbg_queue_info(handle, cmd_buf);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+
+ if (ret)
+ hns3_dbg_help(handle);
+
+ kfree(cmd_buf);
+ cmd_buf = NULL;
+
+ return count;
+}
+
+static const struct file_operations hns3_dbg_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hns3_dbg_cmd_read,
+ .write = hns3_dbg_cmd_write,
+};
+
+void hns3_dbg_init(struct hnae3_handle *handle)
+{
+ const char *name = pci_name(handle->pdev);
+ struct dentry *pfile;
+
+ handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
+ if (!handle->hnae3_dbgfs)
+ return;
+
+ pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+ &hns3_dbg_cmd_fops);
+ if (!pfile) {
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+ dev_warn(&handle->pdev->dev, "create file for %s fail\n",
+ name);
+ }
+}
+
+void hns3_dbg_uninit(struct hnae3_handle *handle)
+{
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+}
+
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+ if (!hns3_dbgfs_root) {
+ pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
+ return;
+ }
+}
+
+void hns3_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hns3_dbgfs_root);
+ hns3_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 20fcf0d1c2ce..d1b2de2ecc89 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,6 +15,7 @@
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/pkt_cls.h>
+#include <net/tcp.h>
#include <net/vxlan.h>
#include "hnae3.h"
@@ -312,6 +313,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
return min_t(u16, rss_size, max_rss_size);
}
+static void hns3_tqp_enable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg |= BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_tqp_disable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -334,6 +353,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
+ /* enable rcb */
+ for (j = 0; j < h->kinfo.num_tqps; j++)
+ hns3_tqp_enable(h->kinfo.tqp[j]);
+
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret)
@@ -344,6 +367,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
return 0;
out_start_err:
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
+
for (j = i - 1; j >= 0; j--)
hns3_vector_disable(&priv->tqp_vector[j]);
@@ -354,11 +380,13 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo;
int i, ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
netif_carrier_off(netdev);
ret = hns3_nic_set_real_num_queue(netdev);
@@ -378,23 +406,24 @@ static int hns3_nic_net_open(struct net_device *netdev)
kinfo->prio_tc[i]);
}
- priv->ae_handle->last_reset_time = jiffies;
return 0;
}
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
int i;
- if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
- return;
-
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
+ /* disable rcb */
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -408,6 +437,11 @@ static void hns3_nic_net_down(struct net_device *netdev)
static int hns3_nic_net_stop(struct net_device *netdev)
{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return 0;
+
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
@@ -1312,6 +1346,15 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ if (features & NETIF_F_GRO_HW)
+ ret = h->ae_algo->ops->set_gro_en(h, true);
+ else
+ ret = h->ae_algo->ops->set_gro_en(h, false);
+ if (ret)
+ return ret;
+ }
+
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1530,18 +1573,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- bool if_running = netif_running(netdev);
int ret;
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
- /* if this was called with netdev up then bring netdevice down */
- if (if_running) {
- (void)hns3_nic_net_stop(netdev);
- msleep(100);
- }
-
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1549,10 +1585,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
else
netdev->mtu = new_mtu;
- /* if the netdev was running earlier, bring it up again */
- if (if_running && hns3_nic_net_open(netdev))
- ret = -EINVAL;
-
return ret;
}
@@ -1615,10 +1647,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
priv->tx_timeout_count++;
- if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
- return;
-
- /* request the reset */
+ /* request the reset, and let the hclge to determine
+ * which reset level should be done
+ */
if (h->ae_algo->ops->reset_event)
h->ae_algo->ops->reset_event(h->pdev, h);
}
@@ -1682,8 +1713,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
static void hns3_get_dev_capability(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev)
{
- if (pdev->revision >= 0x21)
+ if (pdev->revision >= 0x21) {
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
+ }
}
/* hns3_probe - Device initialization routine
@@ -1819,9 +1852,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr prepare\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
+ ae_dev->ops->flr_prepare(ae_dev);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr done\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
+ ae_dev->ops->flr_done(ae_dev);
+}
+
static const struct pci_error_handlers hns3_err_handler = {
.error_detected = hns3_error_detected,
.slot_reset = hns3_slot_reset,
+ .reset_prepare = hns3_reset_prepare,
+ .reset_done = hns3_reset_done,
};
static struct pci_driver hns3_driver = {
@@ -1875,7 +1928,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_GRO_HW;
+ netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) {
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2253,6 +2308,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
+ /* We MUST enable hardware checksum before enabling hardware GRO */
+ if (skb_shinfo(skb)->gso_size) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
/* check if hardware has done checksum */
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
@@ -2296,6 +2357,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
+ if (skb_has_frag_list(skb))
+ napi_gro_flush(&ring->tqp_vector->napi, false);
+
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
@@ -2329,6 +2393,153 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+ unsigned char *va)
+{
+#define HNS3_NEED_ADD_FRAG 1
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb;
+
+ ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+ skb = ring->skb;
+ if (unlikely(!skb)) {
+ netdev_err(netdev, "alloc rx skb fail\n");
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -ENOMEM;
+ }
+
+ prefetchw(skb->data);
+
+ ring->pending_buf = 1;
+ ring->frag_num = 0;
+ ring->tail_skb = NULL;
+ if (length <= HNS3_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* We can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* This page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+ return 0;
+ }
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.seg_pkt_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ __skb_put(skb, ring->pull_len);
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+ desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ return HNS3_NEED_ADD_FRAG;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
+ struct sk_buff **out_skb, bool pending)
+{
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *new_skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *pre_desc;
+ u32 bd_base_info;
+ int pre_bd;
+
+ /* if there is pending bd, the SW param next_to_clean has moved
+ * to next and the next is NULL
+ */
+ if (pending) {
+ pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
+ ring->desc_num;
+ pre_desc = &ring->desc[pre_bd];
+ bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
+ } else {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ }
+
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ return -ENXIO;
+
+ if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
+ HNS3_RX_HEAD_SIZE);
+ if (unlikely(!new_skb)) {
+ netdev_err(ring->tqp->handle->kinfo.netdev,
+ "alloc rx skb frag fail\n");
+ return -ENXIO;
+ }
+ ring->frag_num = 0;
+
+ if (ring->tail_skb) {
+ ring->tail_skb->next = new_skb;
+ ring->tail_skb = new_skb;
+ } else {
+ skb_shinfo(skb)->frag_list = new_skb;
+ ring->tail_skb = new_skb;
+ }
+ }
+
+ if (ring->tail_skb) {
+ head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->data_len += le16_to_cpu(desc->rx.size);
+ head_skb->len += le16_to_cpu(desc->rx.size);
+ skb = ring->tail_skb;
+ }
+
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+ ring->pending_buf++;
+ }
+
+ return 0;
+}
+
+static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info)
+{
+ u16 gro_count;
+ u32 l3_type;
+
+ gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!gro_count)
+ return;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = gro_count;
+
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ if (l3_type == HNS3_L3_TYPE_IPV4)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ return;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+ if (skb_shinfo(skb)->gso_size)
+ tcp_gro_complete(skb);
+}
+
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
@@ -2345,18 +2556,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb, int *out_bnum)
+ struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
- struct sk_buff *skb;
- unsigned char *va;
u32 bd_base_info;
- int pull_len;
u32 l234info;
int length;
- int bnum;
+ int ret;
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
@@ -2368,9 +2577,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
/* Check valid BD */
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
- return -EFAULT;
+ return -ENXIO;
- va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+ if (!skb)
+ ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* Prefetch first cache line of first page
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -2379,62 +2589,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
- prefetch(va);
+ prefetch(ring->va);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(ring->va + L1_CACHE_BYTES);
#endif
- skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
- if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ if (!skb) {
+ ret = hns3_alloc_skb(ring, length, ring->va);
+ *out_skb = skb = ring->skb;
- return -ENOMEM;
- }
-
- prefetchw(skb->data);
-
- bnum = 1;
- if (length <= HNS3_RX_HEAD_SIZE) {
- memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
-
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
- desc_cb->reuse_flag = 1;
- else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ if (ret < 0) /* alloc buffer fail */
+ return ret;
+ if (ret > 0) { /* need add frag */
+ ret = hns3_add_frag(ring, desc, &skb, false);
+ if (ret)
+ return ret;
- ring_ptr_move_fw(ring, next_to_clean);
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
+ }
} else {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
-
- memcpy(__skb_put(skb, pull_len), va,
- ALIGN(pull_len, sizeof(long)));
-
- hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ ret = hns3_add_frag(ring, desc, &skb, true);
+ if (ret)
+ return ret;
- while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
- desc = &ring->desc[ring->next_to_clean];
- desc_cb = &ring->desc_cb[ring->next_to_clean];
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
- bnum++;
- }
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
}
- *out_bnum = bnum;
-
l234info = le32_to_cpu(desc->rx.l234_info);
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -2484,7 +2674,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
+ /* This is needed in order to enable forwarding support */
+ hns3_set_gro_param(skb, l234info, bd_base_info);
+
hns3_rx_checksum(ring, skb, desc);
+ *out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
return 0;
@@ -2497,9 +2691,9 @@ int hns3_clean_rx_ring(
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = NULL;
- int num, bnum = 0;
+ int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ struct sk_buff *skb = ring->skb;
+ int num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
@@ -2513,24 +2707,32 @@ int hns3_clean_rx_ring(
hns3_nic_alloc_rx_buffers(ring,
clean_count + unused_count);
clean_count = 0;
- unused_count = hns3_desc_unused(ring);
+ unused_count = hns3_desc_unused(ring) -
+ ring->pending_buf;
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb, &bnum);
+ err = hns3_handle_rx_bd(ring, &skb);
if (unlikely(!skb)) /* This fault cannot be repaired */
goto out;
- recv_bds += bnum;
- clean_count += bnum;
- if (unlikely(err)) { /* Do jump the err */
- recv_pkts++;
+ if (err == -ENXIO) { /* Do not get FE for the packet */
+ goto out;
+ } else if (unlikely(err)) { /* Do jump the err */
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
continue;
}
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
recv_pkts++;
}
@@ -2669,6 +2871,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
{
+ struct hns3_nic_priv *priv = netdev_priv(napi->dev);
struct hns3_enet_ring *ring;
int rx_pkt_total = 0;
@@ -2677,6 +2880,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
int rx_budget;
+ if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ napi_complete(napi);
+ return 0;
+ }
+
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
@@ -2701,9 +2909,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- napi_complete(napi);
- hns3_update_new_int_gl(tqp_vector);
- hns3_mask_vector_irq(tqp_vector, 1);
+ if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) &&
+ napi_complete(napi)) {
+ hns3_update_new_int_gl(tqp_vector);
+ hns3_mask_vector_irq(tqp_vector, 1);
+ }
return rx_pkt_total;
}
@@ -3319,6 +3529,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+static int hns3_client_start(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_start)
+ return 0;
+
+ return handle->ae_algo->ops->client_start(handle);
+}
+
+static void hns3_client_stop(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_stop)
+ return;
+
+ handle->ae_algo->ops->client_stop(handle);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -3337,7 +3563,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
handle->kinfo.netdev = netdev;
@@ -3357,11 +3582,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- if (handle->flags & HNAE3_SUPPORT_VF)
- handle->reset_level = HNAE3_VF_RESET;
- else
- handle->reset_level = HNAE3_FUNC_RESET;
-
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3392,10 +3612,20 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto out_reg_netdev_fail;
+ }
+
hns3_dcbnl_setup(handle);
- /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
- netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ hns3_dbg_init(handle);
+
+ /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
+ netdev->max_mtu = HNS3_MAX_MTU;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
@@ -3418,11 +3648,18 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ hns3_client_stop(handle);
+
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
+ }
+
hns3_del_all_fd_rules(netdev, true);
hns3_force_clear_all_rx_ring(handle);
@@ -3441,8 +3678,11 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_put_ring_config(priv);
+ hns3_dbg_uninit(handle);
+
priv->ring_data = NULL;
+out_netdev_free:
free_netdev(netdev);
}
@@ -3708,8 +3948,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ /* it is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (hns3_dev_ongoing_func_reset(ae_dev)) {
+ hns3_remove_hw_addr(ndev);
+ hns3_del_all_fd_rules(ndev, false);
+ }
if (!netif_running(ndev))
return 0;
@@ -3720,6 +3974,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
int ret = 0;
if (netif_running(kinfo->netdev)) {
@@ -3729,9 +3984,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
"hns net up fail, ret=%d!\n", ret);
return ret;
}
- handle->last_reset_time = jiffies;
}
+ clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+
return ret;
}
@@ -3771,28 +4027,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret)
+ return ret;
+
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
- return ret;
+ goto err_dealloc_vector;
ret = hns3_init_all_ring(priv);
- if (ret) {
- hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
- }
+ if (ret)
+ goto err_uninit_vector;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ return ret;
+
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ priv->ring_data = NULL;
+err_dealloc_vector:
+ hns3_nic_dealloc_vector_data(priv);
return ret;
}
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+ }
+
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
@@ -3803,18 +4075,15 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
+ ret = hns3_nic_dealloc_vector_data(priv);
+ if (ret)
+ netdev_err(netdev, "dealloc vector error\n");
+
ret = hns3_uninit_all_ring(priv);
if (ret)
netdev_err(netdev, "uninit ring error\n");
- /* it is cumbersome for hardware to pick-and-choose entries for deletion
- * from table space. Hence, for function reset software intervention is
- * required to delete the entries
- */
- if (hns3_dev_ongoing_func_reset(ae_dev)) {
- hns3_remove_hw_addr(netdev);
- hns3_del_all_fd_rules(netdev, false);
- }
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}
@@ -3980,15 +4249,23 @@ static int __init hns3_init_module(void)
INIT_LIST_HEAD(&client.node);
+ hns3_dbg_register_debugfs(hns3_driver_name);
+
ret = hnae3_register_client(&client);
if (ret)
- return ret;
+ goto err_reg_client;
ret = pci_register_driver(&hns3_driver);
if (ret)
- hnae3_unregister_client(&client);
+ goto err_reg_driver;
return ret;
+
+err_reg_driver:
+ hnae3_unregister_client(&client);
+err_reg_client:
+ hns3_dbg_unregister_debugfs();
+ return ret;
}
module_init(hns3_init_module);
@@ -4000,6 +4277,7 @@ static void __exit hns3_exit_module(void)
{
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
+ hns3_dbg_unregister_debugfs();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d3636d088aa3..4e4b48ba78e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -15,7 +15,7 @@ extern const char hns3_driver_version[];
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
- HNS3_NIC_STATE_REINITING,
+ HNS3_NIC_STATE_INITED,
HNS3_NIC_STATE_DOWN,
HNS3_NIC_STATE_DISABLED,
HNS3_NIC_STATE_REMOVING,
@@ -47,7 +47,7 @@ enum hns3_nic_state {
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
-#define HNS3_RING_RX_VM_REG 0x00090
+#define HNS3_RING_EN_REG 0x00090
#define HNS3_RING_T0_BE_RST 0x00094
#define HNS3_RING_COULD_BE_RST 0x00098
#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
@@ -76,7 +76,10 @@ enum hns3_nic_state {
#define HNS3_RING_MAX_PENDING 32768
#define HNS3_RING_MIN_PENDING 8
#define HNS3_RING_BD_MULTIPLE 8
-#define HNS3_MAX_MTU 9728
+/* max frame size of mac */
+#define HNS3_MAC_MAX_FRAME 9728
+#define HNS3_MAX_MTU \
+ (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
@@ -109,6 +112,10 @@ enum hns3_nic_state {
#define HNS3_RXD_DOI_B 21
#define HNS3_RXD_OL3E_B 22
#define HNS3_RXD_OL4E_B 23
+#define HNS3_RXD_GRO_COUNT_S 24
+#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B 30
+#define HNS3_RXD_GRO_ECN_B 31
#define HNS3_RXD_ODMAC_S 0
#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
@@ -135,9 +142,8 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
-#define HNS3_RXD_HDL_S 16
-#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
-#define HNS3_RXD_HSIND_B 31
+#define HNS3_RXD_GRO_SIZE_S 16
+#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -194,6 +200,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_RING_EN_B 0
+
enum hns3_pkt_l3t_type {
HNS3_L3T_NONE,
HNS3_L3T_IPV6,
@@ -399,11 +407,19 @@ struct hns3_enet_ring {
*/
int next_to_clean;
+ int pull_len; /* head length for current packet */
+ u32 frag_num;
+ unsigned char *va; /* first buffer address for current packet */
+
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
+
+ int pending_buf;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
};
struct hns_queue;
@@ -577,6 +593,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring)
return ring->next_to_use == ring->next_to_clean;
}
+static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
{
u8 __iomem *reg_addr = READ_ONCE(base);
@@ -586,7 +607,21 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
- return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+ return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_FLR_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
+ ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
+}
+
+#define hns3_read_dev(a, reg) \
+ hns3_read_reg((a)->io_base, (reg))
+
+static inline bool hns3_nic_resetting(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
}
#define hns3_write_dev(a, reg, value) \
@@ -648,4 +683,8 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle);
static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
#endif
+void hns3_dbg_init(struct hnae3_handle *handle);
+void hns3_dbg_uninit(struct hnae3_handle *handle);
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
+void hns3_dbg_unregister_debugfs(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a4762c2b8ba1..4563638367ac 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev,
int test_index = 0;
u32 i;
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+ return;
+ }
+
/* Only do offline selftest, or pass by default */
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
@@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return;
+ }
+
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
@@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev,
u32 old_desc_num, new_desc_num;
int ret;
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
@@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (queue >= queue_num) {
netdev_err(netdev,
"Invalid queue value %d! Queue max id=%d\n",
@@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
int ret;
int i;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
ret = hns3_check_coalesce_para(netdev, cmd);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 580e81743681..fffe8c1c45d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -6,6 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 690f62ed87dc..8af0cef5609b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hdev->hw.cmq.crq.next_to_use = 0;
hclge_cmd_init_regs(&hdev->hw);
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if ((hclge_is_reset_pending(hdev))) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
+
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 872cd4bdd70d..e1805b972628 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -90,7 +90,6 @@ enum hclge_opcode_type {
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
- HCLGE_OPC_QUERY_AN_RESULT = 0x0306,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
@@ -126,6 +125,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
@@ -152,6 +152,7 @@ enum hclge_opcode_type {
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
@@ -210,6 +211,9 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+ /* SFP command */
+ HCLGE_OPC_SFP_GET_SPEED = 0x7104,
+
/* Error INT commands */
HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d,
@@ -542,20 +546,6 @@ struct hclge_config_mac_speed_dup_cmd {
u8 rsv[22];
};
-#define HCLGE_QUERY_SPEED_S 3
-#define HCLGE_QUERY_AN_B 0
-#define HCLGE_QUERY_DUPLEX_B 2
-
-#define HCLGE_QUERY_SPEED_M GENMASK(4, 0)
-#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B)
-#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B)
-
-struct hclge_query_an_speed_dup_cmd {
- u8 an_syn_dup_speed;
- u8 pause;
- u8 rsv[23];
-};
-
#define HCLGE_RING_ID_MASK GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B 0
@@ -572,6 +562,11 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
+struct hclge_sfp_speed_cmd {
+ __le32 sfp_speed;
+ u32 rsv[5];
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -758,6 +753,12 @@ struct hclge_cfg_tso_status_cmd {
u8 rsv[20];
};
+#define HCLGE_GRO_EN_B 0
+struct hclge_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGE_TSO_MSS_MIN 256
#define HCLGE_TSO_MSS_MAX 9668
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index e72f724123d7..f6323b2501dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -35,7 +35,9 @@ static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
}
}
- return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+ hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+
+ return 0;
}
static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
@@ -70,25 +72,61 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
return 0;
}
+static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
+ u8 *prio_tc)
+{
+ int i;
+
+ if (num_tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev,
+ "tc num checking failed, %u > tc_max(%u)\n",
+ num_tc, hdev->tc_max);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ if (prio_tc[i] >= num_tc) {
+ dev_err(&hdev->pdev->dev,
+ "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
+ i, prio_tc[i], num_tc);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ if (num_tc > hdev->vport[i].alloc_tqps) {
+ dev_err(&hdev->pdev->dev,
+ "allocated tqp(%u) checking failed, %u > tqp(%u)\n",
+ i, num_tc, hdev->vport[i].alloc_tqps);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
u8 *tc, bool *changed)
{
bool has_ets_tc = false;
u32 total_ets_bw = 0;
u8 max_tc = 0;
+ int ret;
u8 i;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (ets->prio_tc[i] >= hdev->tc_max ||
- i >= hdev->tc_max)
- return -EINVAL;
-
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
*changed = true;
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
+ }
+ ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -184,9 +222,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
return ret;
- ret = hclge_tm_schd_info_update(hdev, num_tc);
- if (ret)
- return ret;
+ hclge_tm_schd_info_update(hdev, num_tc);
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -305,20 +341,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
return -EINVAL;
- if (tc > hdev->tc_max) {
- dev_err(&hdev->pdev->dev,
- "setup tc failed, tc(%u) > tc_max(%u)\n",
- tc, hdev->tc_max);
- return -EINVAL;
- }
-
- ret = hclge_tm_schd_info_update(hdev, tc);
+ ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
if (ret)
- return ret;
+ return -EINVAL;
- ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
- if (ret)
- return ret;
+ hclge_tm_schd_info_update(hdev, tc);
+ hclge_tm_prio_tc_info_update(hdev, prio_tc);
ret = hclge_tm_init_hw(hdev);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
new file mode 100644
index 000000000000..14577bbf3e11
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/device.h>
+
+#include "hclge_debugfs.h"
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_tm.h"
+#include "hnae3.h"
+
+static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
+ char *title_buf, char *true_buf,
+ char *false_buf)
+{
+ if (flag)
+ dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
+ true_buf);
+ else
+ dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
+ false_buf);
+}
+
+static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
+{
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
+ return;
+ }
+
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "dump tc\n");
+ dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
+ ets_weight->weight_offset);
+
+ for (i = 0; i < HNAE3_MAX_TC; i++)
+ hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
+ "tc", "no sp mode", "sp mode");
+}
+
+static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
+{
+ struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
+ pg_shap_cfg_cmd->pg_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
+ pg_shap_cfg_cmd->pg_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PORT_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
+ port_shap_cfg_cmd->port_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
+ bp_to_qs_map_cmd->tc_id);
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
+ bp_to_qs_map_cmd->qs_group_id);
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
+ bp_to_qs_map_cmd->qs_bit_map);
+ return;
+
+err_tm_pg_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
+ struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
+ struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_pg_weight_cmd *pg_weight;
+ struct hclge_qs_weight_cmd *qs_weight;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump tm\n");
+ dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
+ pg_to_pri_map->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
+ pg_to_pri_map->pri_bit_map);
+
+ cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
+ qs_to_pri_map->qs_id);
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
+ qs_to_pri_map->priority);
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
+ qs_to_pri_map->link_vld);
+
+ cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
+ nq_to_qs_map->qset_id);
+
+ cmd = HCLGE_OPC_TM_PG_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
+ dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_QS_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
+ dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_PRI_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
+ shap_cfg_cmd->pri_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
+ shap_cfg_cmd->pri_shapping_para);
+
+ hclge_dbg_dump_tm_pg(hdev);
+
+ return;
+
+err_tm_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
+ ret);
+ return;
+ }
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
+ dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
+ pause_param->pause_trans_gap);
+ dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
+ pause_param->pause_trans_time);
+}
+
+static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
+{
+ struct hclge_qos_pri_map_cmd *pri_map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "dump qos pri map fail, status is %d.\n", ret);
+ return;
+ }
+
+ pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump qos pri map\n");
+ dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
+ dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
+ dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
+ dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
+ dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
+ dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
+ dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
+ dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_rx_com_wl *rx_com_wl;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
+
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM; i++)
+ dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ tx_buf_cmd->tx_pkt_buff[i]);
+
+ cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM; i++)
+ dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ rx_buf_cmd->buf_num[i]);
+
+ dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
+ rx_buf_cmd->shared_buf);
+
+ cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+
+ cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ rx_com_thrd->com_thrd[i].high,
+ rx_com_thrd->com_thrd[i].low);
+
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ rx_com_thrd->com_thrd[i].high,
+ rx_com_thrd->com_thrd[i].low);
+
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev, "\n");
+ dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+
+ return;
+
+err_qos_cmd_send:
+ dev_err(&hdev->pdev->dev,
+ "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
+}
+
+static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
+ bool sel_x, u32 loc)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret, i;
+ u32 *req;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ req1->index = cpu_to_le32(loc);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ return;
+
+ dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", loc);
+
+ req = (u32 *)req1->tcam_data;
+ for (i = 0; i < 2; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ req = (u32 *)req2->tcam_data;
+ for (i = 0; i < 6; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ req = (u32 *)req3->tcam_data;
+ for (i = 0; i < 5; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+}
+
+static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
+{
+ u32 i;
+
+ for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
+ hclge_dbg_fd_tcam_read(hdev, 0, true, i);
+ hclge_dbg_fd_tcam_read(hdev, 0, false, i);
+ }
+}
+
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
+ hclge_dbg_fd_tcam(hdev);
+ } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
+ hclge_dbg_dump_tc(hdev);
+ } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
+ hclge_dbg_dump_tm(hdev);
+ } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
+ hclge_dbg_dump_qos_pause_cfg(hdev);
+ } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
+ hclge_dbg_dump_qos_pri_map(hdev);
+ } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
+ hclge_dbg_dump_qos_buf_cfg(hdev);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
new file mode 100644
index 000000000000..50fd0b15fb8e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEBUGFS_H
+#define __HCLGE_DEBUGFS_H
+
+#pragma pack(1)
+
+struct hclge_qos_pri_map_cmd {
+ u8 pri0_tc : 4,
+ pri1_tc : 4;
+ u8 pri2_tc : 4,
+ pri3_tc : 4;
+ u8 pri4_tc : 4,
+ pri5_tc : 4;
+ u8 pri6_tc : 4,
+ pri7_tc : 4;
+ u8 vlan_pri : 4,
+ rev : 4;
+};
+
+#pragma pack()
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 123c37e653f3..6da9e22d82d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -818,7 +818,6 @@ static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
enum hclge_err_int_type int_type)
{
- enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
struct device *dev = &hdev->pdev->dev;
const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
struct hclge_desc desc[2];
@@ -848,23 +847,17 @@ static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
}
err_sts = le32_to_cpu(desc[0].data[2]);
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst1, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
err_sts = le32_to_cpu(desc[0].data[3]);
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst2, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst3, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
}
/* clear PPP INT */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ffdd96020860..1c8cf840dff8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -26,7 +26,7 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
@@ -921,6 +921,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "GRO hardware config cmd failed, ret = %d\n", ret);
+
+ return ret;
+}
+
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
@@ -1144,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -1873,37 +1896,6 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
}
-static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
- u8 *duplex)
-{
- struct hclge_query_an_speed_dup_cmd *req;
- struct hclge_desc desc;
- int speed_tmp;
- int ret;
-
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac speed/autoneg/duplex query cmd failed %d\n",
- ret);
- return ret;
- }
-
- *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
- speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
- HCLGE_QUERY_SPEED_S);
-
- ret = hclge_parse_speed(speed_tmp, speed);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "could not parse speed(=%d), %d\n", speed_tmp, ret);
-
- return ret;
-}
-
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
@@ -1947,12 +1939,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
static int hclge_mac_init(struct hclge_dev *hdev)
{
- struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- int mtu;
int ret;
+ hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
@@ -1964,15 +1954,16 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- if (netdev)
- mtu = netdev->mtu;
- else
- mtu = ETH_DATA_LEN;
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
- ret = hclge_set_mtu(handle, mtu);
+ ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
- "set mtu failed ret=%d\n", ret);
+ "allocate buffer fail, ret=%d\n", ret);
return ret;
}
@@ -2061,34 +2052,58 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
}
+static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
+{
+ struct hclge_sfp_speed_cmd *resp = NULL;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
+ resp = (struct hclge_sfp_speed_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP do not support get SFP speed %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
+ return ret;
+ }
+
+ *speed = resp->sfp_speed;
+
+ return 0;
+}
+
static int hclge_update_speed_duplex(struct hclge_dev *hdev)
{
struct hclge_mac mac = hdev->hw.mac;
- u8 duplex;
int speed;
int ret;
- /* get the speed and duplex as autoneg'result from mac cmd when phy
+ /* get the speed from SFP cmd when phy
* doesn't exit.
*/
- if (mac.phydev || !mac.autoneg)
+ if (mac.phydev)
return 0;
- ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac autoneg/speed/duplex query failed %d\n", ret);
- return ret;
- }
+ /* if IMP does not support get SFP/qSFP speed, return directly */
+ if (!hdev->support_sfp_query)
+ return 0;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac speed/duplex config failed %d\n", ret);
+ ret = hclge_get_sfp_speed(hdev, &speed);
+ if (ret == -EOPNOTSUPP) {
+ hdev->support_sfp_query = false;
+ return ret;
+ } else if (ret) {
return ret;
}
- return 0;
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
+ return 0; /* do nothing if no SFP */
+
+ /* must config full duplex for SFP */
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
}
static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
@@ -2144,7 +2159,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
/* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
@@ -2152,18 +2176,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "core reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
- return HCLGE_VECTOR0_EVENT_RST;
- }
-
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
@@ -2308,21 +2327,56 @@ static int hclge_notify_client(struct hclge_dev *hdev,
int ret;
ret = client->ops->reset_notify(handle, type);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify nic client failed %d(%d)\n", type, ret);
return ret;
+ }
}
return 0;
}
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ int ret = 0;
+ u16 i;
+
+ if (!client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ struct hnae3_handle *handle = &hdev->vport[i].roce;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 5
+#define HCLGE_RESET_WAIT_CNT 200
u32 val, reg, reg_bit;
u32 cnt = 0;
switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_IMP_RESET_BIT;
+ break;
case HNAE3_GLOBAL_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
@@ -2335,6 +2389,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
+ case HNAE3_FLR_RESET:
+ break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -2342,6 +2398,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
+ if (hdev->reset_type == HNAE3_FLR_RESET) {
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < HCLGE_RESET_WAIT_CNT)
+ msleep(HCLGE_RESET_WATI_MS);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout: %d\n", cnt);
+ return -EBUSY;
+ }
+
+ return 0;
+ }
+
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -2358,6 +2428,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return 0;
}
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+ struct hclge_vf_rst_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_vf_rst_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+ req->dest_vfid = func_id;
+
+ if (reset)
+ req->vf_rst = 0x1;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+ int i;
+
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set vf(%d) rst failed %d!\n",
+ vport->vport_id, ret);
+ return ret;
+ }
+
+ if (!reset)
+ continue;
+
+ /* Inform VF to process the reset.
+ * hclge_inform_reset_assert_to_vf may fail if VF
+ * driver is not loaded.
+ */
+ ret = hclge_inform_reset_assert_to_vf(vport);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "inform reset to vf(%d) failed %d!\n",
+ vport->vport_id, ret);
+ }
+
+ return 0;
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -2396,11 +2515,16 @@ static void hclge_do_reset(struct hclge_dev *hdev)
break;
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
- hclge_func_reset_cmd(hdev, 0);
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
+ case HNAE3_FLR_RESET:
+ dev_info(&pdev->dev, "FLR requested\n");
+ /* schedule again to check later */
+ set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -2414,20 +2538,28 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
/* return the highest priority reset level amongst all */
- if (test_bit(HNAE3_GLOBAL_RESET, addr))
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
+ rst_level = HNAE3_IMP_RESET;
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
rst_level = HNAE3_GLOBAL_RESET;
- else if (test_bit(HNAE3_CORE_RESET, addr))
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_CORE_RESET, addr)) {
rst_level = HNAE3_CORE_RESET;
- else if (test_bit(HNAE3_IMP_RESET, addr))
- rst_level = HNAE3_IMP_RESET;
- else if (test_bit(HNAE3_FUNC_RESET, addr))
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
-
- /* now, clear all other resets */
- clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
- clear_bit(HNAE3_IMP_RESET, addr);
- clear_bit(HNAE3_FUNC_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
return rst_level;
}
@@ -2457,39 +2589,206 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
+static int hclge_reset_prepare_down(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ ret = hclge_func_reset_cmd(hdev, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "asserting function reset fail %d!\n", ret);
+ return ret;
+ }
+
+ /* After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hclge_cmd_init is called.
+ */
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ break;
+ case HNAE3_FLR_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ case HNAE3_IMP_RESET:
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
+ break;
+ default:
+ break;
+ }
+
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
+
+ return ret;
+}
+
+static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+{
+#define MAX_RESET_FAIL_CNT 5
+#define RESET_UPGRADE_DELAY_SEC 10
+
+ if (hdev->reset_pending) {
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
+ hdev->reset_pending);
+ return true;
+ } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
+ (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
+ BIT(HCLGE_IMP_RESET_BIT))) {
+ dev_info(&hdev->pdev->dev,
+ "reset failed because IMP Reset is pending\n");
+ hclge_clear_reset_cause(hdev);
+ return false;
+ } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ hdev->reset_fail_cnt++;
+ if (is_timeout) {
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule to wait for hw reset done\n");
+ return true;
+ }
+
+ dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
+ hclge_clear_reset_cause(hdev);
+ mod_timer(&hdev->reset_timer,
+ jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
+
+ return false;
+ }
+
+ hclge_clear_reset_cause(hdev);
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
+ return false;
+}
+
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hnae3_handle *handle;
+ bool is_timeout = false;
+ int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
+ hdev->last_reset_time = jiffies;
/* perform reset of the stack & ae device for a client */
- handle = &hdev->vport[0].nic;
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_reset_prepare_down(hdev);
+ if (ret)
+ goto err_reset;
+
rtnl_lock();
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
rtnl_unlock();
- if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclge_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
- hclge_clear_reset_cause(hdev);
- } else {
- rtnl_lock();
- /* schedule again to check pending resets later */
- set_bit(hdev->reset_type, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
+ if (hclge_reset_wait(hdev)) {
+ is_timeout = true;
+ goto err_reset;
}
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- handle->last_reset_time = jiffies;
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ hclge_clear_reset_cause(hdev);
+
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ return;
+
+err_reset_lock:
rtnl_unlock();
- ae_dev->reset_type = HNAE3_NONE_RESET;
+err_reset:
+ if (hclge_reset_err_handle(hdev, is_timeout))
+ hclge_reset_task_schedule(hdev);
}
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
@@ -2515,20 +2814,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
if (!handle)
handle = &hdev->vport[0].nic;
- if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
return;
- else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
- handle->reset_level = HNAE3_FUNC_RESET;
+ else if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclge_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ hdev->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
- handle->reset_level);
+ hdev->reset_level);
/* request reset & schedule reset task */
- set_bit(handle->reset_level, &hdev->reset_request);
+ set_bit(hdev->reset_level, &hdev->reset_request);
hclge_reset_task_schedule(hdev);
- if (handle->reset_level < HNAE3_GLOBAL_RESET)
- handle->reset_level++;
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
+ hdev->reset_level++;
+}
+
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclge_reset_timer(struct timer_list *t)
+{
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ dev_info(&hdev->pdev->dev,
+ "triggering global reset in reset timer\n");
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -2542,6 +2863,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
* b. else, we can come back later to check this status so re-sched
* now.
*/
+ hdev->last_reset_time = jiffies;
hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_reset(hdev);
@@ -2584,6 +2906,23 @@ static void hclge_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
+{
+ int i;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+ /* If vf is not alive, set to default value */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ }
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
@@ -2596,6 +2935,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
+ hclge_update_vport_alive(hdev);
hclge_service_complete(hdev);
}
@@ -4336,8 +4676,12 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
struct hlist_node *node;
int ret;
+ /* Return ok here, because reset error handling will check this
+ * return value. If error is returned here, the reset process will
+ * fail.
+ */
if (!hnae3_dev_fd_supported(hdev))
- return -EOPNOTSUPP;
+ return 0;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
@@ -4592,6 +4936,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
return 0;
}
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
+}
+
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->reset_count;
+}
+
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4805,10 +5174,6 @@ static int hclge_ae_start(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
-
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, true);
/* mac enable */
hclge_cfg_mac_mode(hdev, true);
@@ -4828,7 +5193,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
@@ -4836,14 +5200,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
cancel_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ /* If it is not PF reset, the firmware will disable the MAC,
+ * so it only need to stop phy here.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+ hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
return;
}
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, false);
-
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -4856,6 +5221,32 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_update_link_status(hdev);
}
+int hclge_vport_start(struct hclge_vport *vport)
+{
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->last_active_jiffies = jiffies;
+ return 0;
+}
+
+void hclge_vport_stop(struct hclge_vport *vport)
+{
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+}
+
+static int hclge_client_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_vport_start(vport);
+}
+
+static void hclge_client_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ hclge_vport_stop(vport);
+}
+
static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
u16 cmdq_resp, u8 resp_code,
enum hclge_mac_vlan_tbl_opcode op)
@@ -6003,54 +6394,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport);
}
-static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
- int max_frm_size;
- int ret;
-
- max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
- return -EINVAL;
-
- max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->max_frm_size = cpu_to_le16(new_mps);
req->min_frm_size = HCLGE_MAC_MIN_FRAME;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
- else
- hdev->mps = max_frm_size;
-
- return ret;
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
struct hclge_dev *hdev = vport->back;
- int ret;
+ int i, max_frm_size, ret = 0;
+
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
+ return -EINVAL;
- ret = hclge_set_mac_mtu(hdev, new_mtu);
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+ mutex_lock(&hdev->vport_lock);
+ /* VF's mps must fit within hdev->mps */
+ if (vport->vport_id && max_frm_size > hdev->mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ } else if (vport->vport_id) {
+ vport->mps = max_frm_size;
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ /* PF's mps must be greater then VF's mps */
+ for (i = 1; i < hdev->num_alloc_vport; i++)
+ if (max_frm_size < hdev->vport[i].mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ }
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
if (ret) {
dev_err(&hdev->pdev->dev,
"Change mtu fail, ret =%d\n", ret);
- return ret;
+ goto out;
}
+ hdev->mps = max_frm_size;
+ vport->mps = max_frm_size;
+
ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
"Allocate buffer fail, ret =%d\n", ret);
+out:
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
@@ -6250,7 +6663,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
if (!phydev->link || !phydev->autoneg)
return 0;
- local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
remote_advertising = LPA_PAUSE_CAP;
@@ -6612,6 +7025,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
+ if (hdev->reset_timer.function)
+ del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
@@ -6620,6 +7035,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_work_sync(&hdev->mbx_service_task);
}
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_FLR_WAIT_MS 100
+#define HCLGE_FLR_WAIT_CNT 50
+ struct hclge_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGE_FLR_WAIT_CNT)
+ msleep(HCLGE_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
+}
+
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -6635,7 +7078,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+
+ mutex_init(&hdev->vport_lock);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -6727,6 +7174,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ goto err_mdiobus_unreg;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6769,6 +7220,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
@@ -6779,6 +7231,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, true);
hclge_state_init(hdev);
+ hdev->last_reset_time = jiffies;
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -6806,6 +7259,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ hclge_vport_start(vport);
+ vport++;
+ }
+}
+
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
@@ -6856,6 +7320,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6887,6 +7355,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
if (hclge_enable_tm_hw_error(hdev, true))
dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
+ hclge_reset_vport_state(hdev);
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -6913,6 +7383,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@@ -7272,9 +7743,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
+static int hclge_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_config_gro(hdev, enable);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
+ .flr_prepare = hclge_flr_prepare,
+ .flr_done = hclge_flr_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
@@ -7285,6 +7766,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
+ .client_start = hclge_client_start,
+ .client_stop = hclge_client_stop,
.get_status = hclge_get_status,
.get_ksettings_an_result = hclge_get_ksettings_an_result,
.update_speed_duplex_h = hclge_update_speed_duplex_h,
@@ -7321,6 +7804,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .set_default_reset_request = hclge_set_def_reset_request,
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
.get_channels = hclge_get_channels,
@@ -7336,7 +7820,12 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
+ .dbg_run_cmd = hclge_dbg_run_cmd,
.process_hw_error = hclge_process_ras_hw_error,
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
+ .ae_dev_resetting = hclge_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+ .set_gro_en = hclge_gro_en,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0d9215404269..0dd23a1d81ea 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -97,11 +97,13 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
/* Reset related Registers */
+#define HCLGE_PF_OTHER_INT_REG 0x20600
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
+#define HCLGE_IMP_RESET_BIT 2
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -115,8 +117,10 @@ enum HLCGE_PORT_TYPE {
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+
#define HCLGE_MAC_DEFAULT_FRAME \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
#define HCLGE_MAC_MAX_FRAME 9728
@@ -151,6 +155,7 @@ enum hclge_evt_cause {
#define HCLGE_MPF_ENBALE 1
enum HCLGE_MAC_SPEED {
+ HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
@@ -593,10 +598,16 @@ struct hclge_dev {
struct hclge_misc_vector misc_vector;
struct hclge_hw_stats hw_stats;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long last_reset_time;
enum hnae3_reset_type reset_type;
+ enum hnae3_reset_type reset_level;
+ unsigned long default_reset_request;
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
+ unsigned long reset_count; /* the number of reset has been done */
+ u32 reset_fail_cnt;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -614,6 +625,7 @@ struct hclge_dev {
u8 hw_tc_map;
u8 tc_num_last_time;
enum hclge_fc_mode fc_mode_last_time;
+ u8 support_sfp_query;
#define HCLGE_FLAG_TC_BASE_SCH_MODE 1
#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
@@ -644,6 +656,7 @@ struct hclge_dev {
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
+ struct timer_list reset_timer;
struct work_struct service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -667,6 +680,8 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */
+ /* vport_lock protect resource shared by vports */
+ struct mutex vport_lock;
struct hclge_vlan_type_cfg vlan_type_cfg;
@@ -717,6 +732,11 @@ struct hclge_rss_tuple_cfg {
u8 ipv6_fragment_en;
};
+enum HCLGE_VPORT_STATE {
+ HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAX
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -742,6 +762,10 @@ struct hclge_vport {
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
+
+ unsigned long state;
+ unsigned long last_active_jiffies;
+ u32 mps; /* Max packet size */
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -768,6 +792,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
+static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
+{
+ return !!hdev->reset_pending;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
@@ -777,9 +807,14 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
+int hclge_vport_start(struct hclge_vport *vport);
+void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f890022938d9..e16a730a5f54 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
-static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+ enum hnae3_reset_type reset_type;
u8 msg_data[2];
u8 dest_vfid;
dest_vfid = (u8)vport->vport_id;
+ if (hdev->reset_type == HNAE3_FUNC_RESET)
+ reset_type = HNAE3_VF_PF_FUNC_RESET;
+ else if (hdev->reset_type == HNAE3_FLR_RESET)
+ reset_type = HNAE3_VF_FULL_RESET;
+ else
+ return -EINVAL;
+
+ memcpy(&msg_data[0], &reset_type, sizeof(u16));
+
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
@@ -290,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
return status;
}
+static int hclge_set_vf_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ bool alive = !!mbx_req->msg[2];
+ int ret = 0;
+
+ if (alive)
+ ret = hclge_vport_start(vport);
+ else
+ hclge_vport_stop(vport);
+
+ return ret;
+}
+
static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -363,24 +389,28 @@ static void hclge_reset_vf(struct hclge_vport *vport,
int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
- mbx_req->mbx_src_vfid);
-
- /* Acknowledge VF that PF is now about to assert the reset for the VF.
- * On receiving this message VF will get into pending state and will
- * start polling for the hardware reset completion status.
- */
- ret = hclge_inform_reset_assert_to_vf(vport);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
- ret, vport->vport_id);
- return;
- }
+ vport->vport_id);
- dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
- mbx_req->mbx_src_vfid);
- /* reset this virtual function */
- hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
+ ret = hclge_func_reset_cmd(hdev, vport->vport_id);
+ hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
+static void hclge_vf_keep_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ vport->last_active_jiffies = jiffies;
+}
+
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ int ret;
+ u32 mtu;
+
+ memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
+ ret = hclge_set_vport_mtu(vport, mtu);
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
@@ -460,6 +490,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to config VF's VLAN\n",
ret);
break;
+ case HCLGE_MBX_SET_ALIVE:
+ ret = hclge_set_vf_alive(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ break;
case HCLGE_MBX_GET_QINFO:
ret = hclge_get_vf_queue_info(vport, req, true);
if (ret)
@@ -487,6 +524,15 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_RESET:
hclge_reset_vf(vport, req);
break;
+ case HCLGE_MBX_KEEP_ALIVE:
+ hclge_vf_keep_alive(vport, req);
+ break;
+ case HCLGE_MBX_SET_MTU:
+ ret = hclge_set_vf_mtu(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03018638f701..741cb3b9519d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -195,12 +195,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
int ret;
if (!phydev)
return 0;
- phydev->supported &= ~SUPPORTED_FIBRE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
@@ -210,7 +211,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
return ret;
}
- phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
phy_support_asym_pause(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 494e562fe8c7..00458da67503 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1259,15 +1259,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
return 0;
}
-int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
{
struct hclge_vport *vport = hdev->vport;
struct hnae3_knic_private_info *kinfo;
u32 i, k;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- if (prio_tc[i] >= hdev->tm_info.num_tc)
- return -EINVAL;
hdev->tm_info.prio_tc[i] = prio_tc[i];
for (k = 0; k < hdev->num_alloc_vport; k++) {
@@ -1275,18 +1273,12 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
kinfo->prio_tc[i] = prio_tc[i];
}
}
- return 0;
}
-int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
u8 i, bit_map = 0;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- if (num_tc > hdev->vport[i].alloc_tqps)
- return -EINVAL;
- }
-
hdev->tm_info.num_tc = num_tc;
for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -1300,8 +1292,6 @@ int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hdev->hw_tc_map = bit_map;
hclge_tm_schd_info_init(hdev);
-
- return 0;
}
int hclge_tm_init_hw(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 25eef13a3e14..9c6192c46aa6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -55,6 +55,12 @@ struct hclge_qs_weight_cmd {
u8 dwrr;
};
+struct hclge_ets_tc_weight_cmd {
+ u8 tc_weight[HNAE3_MAX_TC];
+ u8 weight_offset;
+ u8 rsvd[15];
+};
+
#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0)
#define HCLGE_TM_SHAP_IR_B_LSH 0
#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8)
@@ -131,8 +137,8 @@ struct hclge_port_shapping_cmd {
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_pause_setup_hw(struct hclge_dev *hdev);
int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
-int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
-int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 0d3b445f6799..d5765c8cf3a3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode)
return false;
}
+static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_dev *hdev = ring->dev;
+ struct hclgevf_hw *hw = &hdev->hw;
+ u32 reg_val;
+
+ if (ring->flag == HCLGEVF_TYPE_CSQ) {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
+{
+ hclgevf_cmd_config_regs(&hw->cmq.csq);
+ hclgevf_cmd_config_regs(&hw->cmq.crq);
+}
+
static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
@@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
}
}
-static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
- struct hclgevf_cmq_ring *ring)
+static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
{
struct hclgevf_hw *hw = &hdev->hw;
- int ring_type = ring->flag;
- u32 reg_val;
+ struct hclgevf_cmq_ring *ring =
+ (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret;
- ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- spin_lock_init(&ring->lock);
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
ring->dev = hdev;
+ ring->flag = ring_type;
/* allocate CSQ/CRQ descriptor */
ret = hclgevf_alloc_cmd_desc(ring);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
- return ret;
- }
- /* initialize the hardware registers with csq/crq dma-address,
- * descriptor number, head & tail pointers
- */
- switch (ring_type) {
- case HCLGEVF_TYPE_CSQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- return 0;
- case HCLGEVF_TYPE_CRQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
- return 0;
- default:
- return -EINVAL;
- }
+ return ret;
}
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
@@ -188,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ if (num > hclgevf_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -282,55 +284,83 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
return status;
}
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
{
- u32 version;
int ret;
- /* setup Tx write back timeout */
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+ hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- /* setup queue CSQ/CRQ rings */
- hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CSQ ring\n", ret);
+ "CSQ ring setup error %d\n", ret);
return ret;
}
- hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CRQ ring\n", ret);
+ "CRQ ring setup error %d\n", ret);
goto err_csq;
}
+ return 0;
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ return ret;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock_bh(&hdev->hw.cmq.crq.lock);
+
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
hdev->arq.count = 0;
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+ hdev->hw.cmq.crq.next_to_use = 0;
+
+ hclgevf_cmd_init_regs(&hdev->hw);
+
+ spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
/* get firmware version */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret);
- goto err_crq;
+ return ret;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
-err_crq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-err_csq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
-
- return ret;
}
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index bc294b0c8b62..47030b42341f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -87,6 +87,8 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* GRO command */
+ HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
@@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
+#define HCLGEVF_GRO_EN_B 0
+struct hclgevf_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
@@ -256,6 +264,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
int hclgevf_cmd_init(struct hclgevf_dev *hdev);
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 085edb945389..efec1b7a6a64 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
#include <net/rtnetlink.h>
#include "hclgevf_cmd.h"
#include "hclgevf_main.h"
@@ -10,8 +11,7 @@
#define HCLGEVF_NAME "hclgevf"
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
-static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
@@ -209,12 +209,6 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
struct hclgevf_tqp *tqp;
int i;
- /* if this is on going reset then we need to re-allocate the TPQs
- * since we cannot assume we would get same number of TPQs back from PF
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, hdev->htqp);
-
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclgevf_tqp), GFP_KERNEL);
if (!hdev->htqp)
@@ -258,12 +252,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
new_tqps = kinfo->rss_size * kinfo->num_tc;
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
- /* if this is on going reset then we need to re-allocate the hnae queues
- * as well since number of TPQs from PF might have changed.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
-
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
@@ -868,6 +856,9 @@ static int hclgevf_unmap_ring_from_vector(
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret, vector_id;
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
vector_id = hclgevf_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&handle->pdev->dev,
@@ -956,13 +947,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
return status;
}
-static int hclgevf_get_queue_id(struct hnae3_queue *queue)
-{
- struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
-
- return tqp->index;
-}
-
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -1097,38 +1081,87 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
2, true, NULL, 0);
}
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
+ sizeof(new_mtu), true, NULL, 0);
+}
+
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->nic_client;
struct hnae3_handle *handle = &hdev->nic;
+ int ret;
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- return client->ops->reset_notify(handle, type);
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
+
+ return ret;
+}
+
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
+static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
+ unsigned long delay_us,
+ unsigned long wait_cnt)
+{
+ unsigned long cnt = 0;
+
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < wait_cnt)
+ usleep_range(delay_us, delay_us * 2);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_RESET_WAIT_MS 500
-#define HCLGEVF_RESET_WAIT_CNT 20
- u32 val, cnt = 0;
+#define HCLGEVF_RESET_WAIT_US 20000
+#define HCLGEVF_RESET_WAIT_CNT 2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
+ (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+ u32 val;
+ int ret;
/* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
- msleep(HCLGEVF_RESET_WAIT_MS);
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- cnt++;
- }
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ return hclgevf_flr_poll_timeout(hdev,
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_CNT);
+
+ ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
- if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
- dev_warn(&hdev->pdev->dev,
- "could'nt get reset done status from h/w, timeout!\n");
- return -EBUSY;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could'nt get reset done status from h/w, timeout!\n");
+ return ret;
}
/* we will wait a bit more to let reset of the stack to complete. This
@@ -1145,10 +1178,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
int ret;
/* uninitialize the nic client */
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
/* re-initialize the hclge device */
- ret = hclgevf_init_hdev(hdev);
+ ret = hclgevf_reset_hdev(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"hclge device re-init failed, VF is disabled!\n");
@@ -1156,22 +1191,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
}
/* bring up the nic client again */
- hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
return 0;
}
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_VF_FUNC_RESET:
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
+ 0, true, NULL, sizeof(u8));
+ break;
+ case HNAE3_FLR_RESET:
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ default:
+ break;
+ }
+
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
+ hdev->reset_type, ret);
+
+ return ret;
+}
+
static int hclgevf_reset(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
+ /* Initialize ae_dev reset status as well, in case enet layer wants to
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
+ ret = hclgevf_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
+
/* check if VF could successfully fetch the hardware reset completion
* status from the hardware
*/
@@ -1181,58 +1254,118 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
dev_err(&hdev->pdev->dev,
"VF failed(=%d) to fetch H/W reset completion status\n",
ret);
-
- dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
- rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-
- rtnl_unlock();
- return ret;
+ goto err_reset;
}
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
ret = hclgevf_reset_stack(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
+ goto err_reset_lock;
+ }
/* bring up the nic to enable TX/RX again */
- hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
return ret;
-}
+err_reset_lock:
+ rtnl_unlock();
+err_reset:
+ /* When VF reset failed, only the higher level reset asserted by PF
+ * can restore it, so re-initialize the command queue to receive
+ * this higher reset event.
+ */
+ hclgevf_cmd_init(hdev);
+ dev_err(&hdev->pdev->dev, "failed to reset VF\n");
-static int hclgevf_do_reset(struct hclgevf_dev *hdev)
-{
- int status;
- u8 respmsg;
+ return ret;
+}
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
- 0, false, &respmsg, sizeof(u8));
- if (status)
- dev_err(&hdev->pdev->dev,
- "VF reset request to PF failed(=%d)\n", status);
+static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
+ unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_VF_RESET, addr)) {
+ rst_level = HNAE3_VF_RESET;
+ clear_bit(HNAE3_VF_RESET, addr);
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
+ rst_level = HNAE3_VF_FULL_RESET;
+ clear_bit(HNAE3_VF_FULL_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_PF_FUNC_RESET;
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_FUNC_RESET;
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
- return status;
+ return rst_level;
}
static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclgevf_dev *hdev = ae_dev->priv;
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
- handle->reset_level = HNAE3_VF_RESET;
+ if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclgevf_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- handle->last_reset_time = jiffies;
+ hdev->last_reset_time = jiffies;
+}
+
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGEVF_FLR_WAIT_MS 100
+#define HCLGEVF_FLR_WAIT_CNT 50
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclgevf_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGEVF_FLR_WAIT_CNT)
+ msleep(HCLGEVF_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1321,9 +1454,15 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
hdev->reset_attempts = 0;
- ret = hclgevf_reset(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
+ hdev->last_reset_time = jiffies;
+ while ((hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending))
+ != HNAE3_NONE_RESET) {
+ ret = hclgevf_reset(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF stack reset failed %d.\n", ret);
+ }
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
@@ -1352,19 +1491,17 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
if (hdev->reset_attempts > 3) {
/* prepare for full reset of stack + pcie interface */
- hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- /* request PF for resetting this VF via mailbox */
- ret = hclgevf_do_reset(hdev);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "VF rst fail, stack will call\n");
+ set_bit(hdev->reset_level, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
+ hclgevf_reset_task_schedule(hdev);
}
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
@@ -1386,6 +1523,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclgevf_keep_alive_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
+
+ schedule_work(&hdev->keep_alive_task);
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+}
+
+static void hclgevf_keep_alive_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+ u8 respmsg;
+ int ret;
+
+ hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
+ 0, false, &respmsg, sizeof(u8));
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF sends keep alive cmd failed(=%d)\n", ret);
+}
+
static void hclgevf_service_task(struct work_struct *work)
{
struct hclgevf_dev *hdev;
@@ -1407,24 +1566,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
}
-static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ u32 *clearval)
{
- u32 cmdq_src_reg;
+ u32 cmdq_src_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev,
+ "receive reset interrupt 0x%x!\n", rst_ing_reg);
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGEVF_VECTOR0_EVENT_RST;
+ }
+
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
*clearval = cmdq_src_reg;
- return true;
+ return HCLGEVF_VECTOR0_EVENT_MBX;
}
dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
- return false;
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
}
static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
@@ -1434,19 +1606,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
+ enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data;
u32 clearval;
hclgevf_enable_vector(&hdev->misc_vector, false);
- if (!hclgevf_check_event_cause(hdev, &clearval))
- goto skip_sched;
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval);
- hclgevf_mbx_handler(hdev);
-
- hclgevf_clear_event_cause(hdev, clearval);
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+ hclgevf_reset_task_schedule(hdev);
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+ break;
+ default:
+ break;
+ }
-skip_sched:
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
+ hclgevf_clear_event_cause(hdev, clearval);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+ }
return IRQ_HANDLED;
}
@@ -1504,6 +1685,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+{
+ struct hclgevf_cfg_gro_status_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ false);
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
@@ -1566,21 +1770,7 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
static int hclgevf_ae_start(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* ring enable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, true);
- }
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
@@ -1595,24 +1785,10 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
static void hclgevf_ae_stop(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* Ring disable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, false);
- }
-
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
del_timer_sync(&hdev->service_timer);
@@ -1621,12 +1797,40 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
hclgevf_update_link_status(hdev, 0);
}
-static void hclgevf_state_init(struct hclgevf_dev *hdev)
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
{
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data;
+
+ msg_data = alive ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
+ 0, &msg_data, 1, false, NULL, 0);
+}
+
+static int hclgevf_client_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ return hclgevf_set_alive(handle, true);
+}
+
+static void hclgevf_client_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+ ret = hclgevf_set_alive(handle, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "%s failed %d\n", __func__, ret);
+
+ del_timer_sync(&hdev->keep_alive_timer);
+ cancel_work_sync(&hdev->keep_alive_task);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
/* setup tasks for the MBX */
INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
@@ -1668,10 +1872,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
int vectors;
int i;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
@@ -1710,6 +1910,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(int), GFP_KERNEL);
if (!hdev->vector_irq) {
+ devm_kfree(&pdev->dev, hdev->vector_status);
pci_free_irq_vectors(pdev);
return -ENOMEM;
}
@@ -1721,6 +1922,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ devm_kfree(&pdev->dev, hdev->vector_irq);
pci_free_irq_vectors(pdev);
}
@@ -1728,10 +1931,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
int ret = 0;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
hclgevf_get_misc_vector(hdev);
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
@@ -1861,14 +2060,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
struct hclgevf_hw *hw;
int ret;
- /* check if we need to skip initialization of pci. This will happen if
- * device is undergoing VF reset. Otherwise, we would need to
- * re-initialize pci interface again i.e. when device is not going
- * through *any* reset or actually undergoing full reset.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
@@ -1957,23 +2148,98 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0;
}
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = 0;
+
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ pci_set_master(pdev);
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed(%d) to init MSI/MSI-X\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ hclgevf_uninit_msi(hdev);
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ return ret;
+ }
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ return ret;
+}
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int ret;
- /* check if device is on-going full reset(i.e. pcie as well) */
- if (hclgevf_dev_ongoing_full_reset(hdev)) {
- dev_warn(&pdev->dev, "device is going full reset\n");
- hclgevf_uninit_hdev(hdev);
+ ret = hclgevf_pci_reset(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci reset failed %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cmd failed %d\n", ret);
+ return ret;
}
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ return ret;
+ }
+
+ dev_info(&hdev->pdev->dev, "Reset done\n");
+
+ return 0;
+}
+
+static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
ret = hclgevf_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI initialization failed\n");
return ret;
}
+ ret = hclgevf_cmd_queue_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+ goto err_cmd_queue_init;
+ }
+
ret = hclgevf_cmd_init(hdev);
if (ret)
goto err_cmd_init;
@@ -1983,16 +2249,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"Query vf status error, ret = %d.\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
hclgevf_state_init(hdev);
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
@@ -2001,6 +2268,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -2019,6 +2288,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ goto err_config;
+
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2034,6 +2307,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ hdev->last_reset_time = jiffies;
pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
return 0;
@@ -2043,25 +2317,31 @@ err_config:
err_misc_irq_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_query_vf:
- hclgevf_cmd_uninit(hdev);
err_cmd_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_queue_init:
hclgevf_pci_uninit(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
}
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
hclgevf_state_uninit(hdev);
- hclgevf_misc_irq_uninit(hdev);
+
+ if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ hclgevf_pci_uninit(hdev);
+ }
+
hclgevf_cmd_uninit(hdev);
- hclgevf_uninit_msi(hdev);
- hclgevf_pci_uninit(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
int ret;
ret = hclgevf_alloc_hdev(ae_dev);
@@ -2071,10 +2351,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclgevf_init_hdev(ae_dev->priv);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "hclge device initialization failed\n");
+ return ret;
+ }
- return ret;
+ hdev = ae_dev->priv;
+ timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
+ INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
+
+ return 0;
}
static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -2151,6 +2437,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
+static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_config_gro(hdev, enable);
+}
+
static void hclgevf_get_media_type(struct hnae3_handle *handle,
u8 *media_type)
{
@@ -2159,13 +2452,38 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle,
*media_type = hdev->hw.mac.media_type;
}
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+}
+
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->reset_count;
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .flr_prepare = hclgevf_flr_prepare,
+ .flr_done = hclgevf_flr_done,
.init_client_instance = hclgevf_init_client_instance,
.uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
+ .client_start = hclgevf_client_start,
+ .client_stop = hclgevf_client_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
.get_vector = hclgevf_get_vector,
@@ -2193,11 +2511,17 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_vlan_filter = hclgevf_set_vlan_filter,
.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
+ .set_default_reset_request = hclgevf_set_def_reset_request,
.get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
.get_status = hclgevf_get_status,
.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
.get_media_type = hclgevf_get_media_type,
+ .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
+ .ae_dev_resetting = hclgevf_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+ .set_gro_en = hclgevf_gro_en,
+ .set_mtu = hclgevf_set_mtu,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index aed241e8ffab..4517b7ea5817 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -31,11 +31,19 @@
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+/* RST register bits for RESET event */
+#define HCLGEVF_VECTOR0_RST_INT_B 2
#define HCLGEVF_TQP_RESET_TRY_TIMES 10
/* Reset related Registers */
-#define HCLGEVF_FUN_RST_ING 0x20C00
-#define HCLGEVF_FUN_RST_ING_B 0
+#define HCLGEVF_RST_ING 0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
@@ -54,17 +62,25 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+enum hclgevf_evt_cause {
+ HCLGEVF_VECTOR0_EVENT_RST,
+ HCLGEVF_VECTOR0_EVENT_MBX,
+ HCLGEVF_VECTOR0_EVENT_OTHER,
+};
+
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
+ HCLGEVF_STATE_IRQ_INITED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
+ HCLGEVF_STATE_CMD_DISABLE,
};
#define HCLGEVF_MPF_ENBALE 1
@@ -145,10 +161,17 @@ struct hclgevf_dev {
struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long default_reset_request;
+ unsigned long last_reset_time;
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
+ unsigned long reset_count; /* the number of reset has been done */
u32 reset_attempts;
u32 fw_version;
@@ -178,7 +201,9 @@ struct hclgevf_dev {
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
struct timer_list service_timer;
+ struct timer_list keep_alive_timer;
struct work_struct service_task;
+ struct work_struct keep_alive_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -192,18 +217,9 @@ struct hclgevf_dev {
u32 flag;
};
-static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
-{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_RESET));
-}
-
-static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
+ return !!hdev->reset_pending;
}
int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index e9d5a4f96304..ef9c8e6eca28 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ return -EIO;
+
udelay(HCLGEVF_SLEEP_USCOEND);
i++;
}
@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq = &hdev->hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev, "vf crq need init\n");
+ return;
+ }
+
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
@@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
+ enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u8 duplex;
@@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev,
+ "vf crq need init in async\n");
+ return;
+ }
+
msg_q = hdev->arq.msg_q[hdev->arq.head];
switch (msg_q[0]) {
@@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- hdev->nic.reset_level = HNAE3_VF_RESET;
+ reset_type = le16_to_cpu(msg_q[1]);
+ set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 097b5502603f..d1a7d2522d82 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -50,6 +50,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_GET_LINK_STATE = 24,
+ HINIC_PORT_CMD_SET_RX_CSUM = 26,
+
HINIC_PORT_CMD_SET_PORT_STATE = 41,
HINIC_PORT_CMD_FWCTXT_INIT = 69,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index f92f1bf3901a..1dfa7eb05c10 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -74,12 +74,6 @@
((void *)((cmdq_pages)->shadow_page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
-#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
- (wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \
- & ((wq)->num_q_pages - 1))
-
#define WQ_PAGE_ADDR(wq, idx) \
((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
@@ -93,6 +87,17 @@
(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
/ (wq)->max_wqe_size)
+static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) & ((wq)->num_wqebbs_per_page - 1))
+ << (wq)->wqebb_size_shift);
+}
+
+static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) >> ((wq)->wqebbs_per_page_shift))
+ & ((wq)->num_q_pages - 1));
+}
/**
* queue_alloc_page - allocate page for Queue
* @hwif: HW interface for allocating DMA
@@ -513,10 +518,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
int err;
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
@@ -530,9 +536,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
return -EINVAL;
}
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
@@ -550,7 +558,8 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
wq->q_depth = q_depth;
wq->max_wqe_size = max_wqe_size;
wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
+ wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
+ wq->wqebb_size_shift = wqebb_size_shift;
wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
@@ -604,11 +613,13 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
+ u16 num_wqebbs_per_page_shift;
u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
int i, j, err = -ENOMEM;
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
@@ -622,9 +633,11 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
return -EINVAL;
}
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
@@ -636,6 +649,7 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
return err;
}
+ num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
for (i = 0; i < cmdq_blocks; i++) {
wq[i].hwif = hwif;
@@ -647,7 +661,8 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
wq[i].q_depth = q_depth;
wq[i].max_wqe_size = max_wqe_size;
wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
+ wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
+ wq[i].wqebb_size_shift = wqebb_size_shift;
wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
@@ -741,7 +756,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
atomic_add(num_wqebbs, &wq->delta);
@@ -795,7 +810,8 @@ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
**/
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
atomic_add(num_wqebbs, &wq->cons_idx);
@@ -813,7 +829,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *cons_idx)
{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
u16 curr_cons_idx, end_cons_idx;
int curr_pg, end_pg;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 9b66545ba563..0a936cd6709b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -39,7 +39,8 @@ struct hinic_wq {
u16 q_depth;
u16 max_wqe_size;
u16 num_wqebbs_per_page;
-
+ u16 wqebbs_per_page_shift;
+ u16 wqebb_size_shift;
/* The addresses are 64 bit in the HW */
u64 block_paddr;
void **shadow_block_vaddr;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index 9754d6ed5f4a..138941527872 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -170,6 +170,10 @@
#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0
+
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU
+
#define HINIC_RQ_CQE_STATUS_GET(val, member) \
(((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
HINIC_RQ_CQE_STATUS_##member##_MASK)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index fdf2bdb6b0d0..6d48dc62a44b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -600,9 +600,6 @@ static int add_mac_addr(struct net_device *netdev, const u8 *addr)
u16 vid = 0;
int err;
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
-
netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
@@ -726,6 +723,7 @@ static void set_rx_mode(struct work_struct *work)
{
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
+ struct netdev_hw_addr *ha;
netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
@@ -733,6 +731,9 @@ static void set_rx_mode(struct work_struct *work)
__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
+
+ netdev_for_each_mc_addr(ha, nic_dev->netdev)
+ add_mac_addr(nic_dev->netdev, ha->addr);
}
static void hinic_set_rx_mode(struct net_device *netdev)
@@ -806,7 +807,8 @@ static const struct net_device_ops hinic_netdev_ops = {
static void netdev_features_init(struct net_device *netdev)
{
netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
netdev->vlan_features = netdev->hw_features;
@@ -869,12 +871,16 @@ static int set_features(struct hinic_dev *nic_dev,
netdev_features_t features, bool force_change)
{
netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
+ u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
int err = 0;
if (changed & NETIF_F_TSO)
err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
+ if (changed & NETIF_F_RXCSUM)
+ err = hinic_set_rx_csum_offload(nic_dev, csum_en);
+
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 7575a7d3bd9f..122c93597268 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -409,3 +409,33 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
return 0;
}
+
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
+{
+ struct hinic_checksum_offload rx_csum_cfg = {0};
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+ rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rx_csum_cfg.rx_csum_offload = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+ &rx_csum_cfg, sizeof(rx_csum_cfg),
+ &rx_csum_cfg, &out_size);
+ if (err || !out_size || rx_csum_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rx csum offload, ret = %d\n",
+ rx_csum_cfg.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index f6e3220fe28f..02d896eed455 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -183,6 +183,15 @@ struct hinic_tso_config {
u8 resv2[3];
};
+struct hinic_checksum_offload {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -213,4 +222,5 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 4c0f7eda1166..f86f2e693224 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -89,6 +89,28 @@ static void rxq_stats_init(struct hinic_rxq *rxq)
hinic_rxq_clean_stats(rxq);
}
+static void rx_csum(struct hinic_rxq *rxq, u16 cons_idx,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_rq_cqe *cqe;
+ struct hinic_rq *rq;
+ u32 csum_err;
+ u32 status;
+
+ rq = rxq->rq;
+ cqe = rq->cqe[cons_idx];
+ status = be32_to_cpu(cqe->status);
+ csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (!csum_err)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+}
/**
* rx_alloc_skb - allocate skb and map it to dma address
* @rxq: rx queue
@@ -207,9 +229,9 @@ skb_out:
wmb(); /* write all the wqes before update PI */
hinic_rq_update(rxq->rq, prod_idx);
+ tasklet_schedule(&rxq->rx_task);
}
- tasklet_schedule(&rxq->rx_task);
return i;
}
@@ -328,6 +350,8 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+ rx_csum(rxq, ci, skb);
+
prefetch(skb->data);
pkt_len = sge.len;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 27c9af4b1c12..ab3fabab91b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -23,6 +23,10 @@
#include "hinic_hw_qp.h"
+#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
+#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7)
+#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8)
+
struct hinic_rxq_stats {
u64 pkts;
u64 bytes;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 760b2ad8e295..209255495bc9 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2455,7 +2455,8 @@ static void emac_adjust_link(struct net_device *ndev)
dev->phy.duplex = phy->duplex;
dev->phy.pause = phy->pause;
dev->phy.asym_pause = phy->asym_pause;
- dev->phy.advertising = phy->advertising;
+ ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
+ phy->advertising);
}
static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
@@ -2490,7 +2491,8 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
phy_dev->autoneg = phy->autoneg;
phy_dev->speed = phy->speed;
phy_dev->duplex = phy->duplex;
- phy_dev->advertising = phy->advertising;
+ ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
+ phy->advertising);
return phy_start_aneg(phy_dev);
}
@@ -2624,7 +2626,8 @@ static int emac_dt_phy_connect(struct emac_instance *dev,
dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
dev->phy.def->name = dev->phy_dev->drv->name;
dev->phy.def->ops = &emac_dt_mdio_phy_ops;
- dev->phy.features = dev->phy_dev->supported;
+ ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
+ dev->phy_dev->supported);
dev->phy.address = dev->phy_dev->mdio.addr;
dev->phy.mode = dev->phy_dev->interface;
return 0;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 7c4b55482f72..5e5c57db0d3f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2225,11 +2225,13 @@ static int e100_poll(struct napi_struct *napi, int budget)
e100_rx_clean(nic, &work_done, budget);
e100_tx_clean(nic);
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ /* If budget fully consumed, continue polling */
+ if (work_done == budget)
+ return budget;
+
+ /* only re-enable interrupt if stack agrees polling is really done */
+ if (likely(napi_complete_done(napi, work_done)))
e100_enable_irq(nic);
- }
return work_done;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 43b6d3cec3b3..8fe9af0e2ab7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3803,14 +3803,15 @@ static int e1000_clean(struct napi_struct *napi, int budget)
adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
- if (!tx_clean_complete)
- work_done = budget;
+ if (!tx_clean_complete || work_done == budget)
+ return budget;
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
- napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c760dc72c520..be13227f1697 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -505,6 +505,9 @@ extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
void e1000e_ptp_remove(struct e1000_adapter *adapter);
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+ struct ptp_system_timestamp *sts);
+
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
return hw->phy.ops.reset(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 16a73bd9f4cb..308c006cb41d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2651,9 +2651,9 @@ err:
/**
* e1000e_poll - NAPI Rx polling callback
* @napi: struct associated with this polling callback
- * @weight: number of packets driver is allowed to process this poll
+ * @budget: number of packets driver is allowed to process this poll
**/
-static int e1000e_poll(struct napi_struct *napi, int weight)
+static int e1000e_poll(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
napi);
@@ -2667,16 +2667,17 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
- adapter->clean_rx(adapter->rx_ring, &work_done, weight);
+ adapter->clean_rx(adapter->rx_ring, &work_done, budget);
- if (!tx_cleaned)
- work_done = weight;
+ if (!tx_cleaned || work_done == budget)
+ return budget;
- /* If weight not fully consumed, exit the polling mode */
- if (work_done < weight) {
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
- napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val);
@@ -4319,13 +4320,16 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
/**
* e1000e_sanitize_systim - sanitize raw cycle counter reads
* @hw: pointer to the HW structure
- * @systim: time value read, sanitized and returned
+ * @systim: PHC time value read, sanitized and returned
+ * @sts: structure to hold system time before and after reading SYSTIML,
+ * may be NULL
*
* Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
* check to see that the time is incrementing at a reasonable
* rate and is a multiple of incvalue.
**/
-static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
+static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim,
+ struct ptp_system_timestamp *sts)
{
u64 time_delta, rem, temp;
u64 systim_next;
@@ -4335,7 +4339,9 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
/* latch SYSTIMH on read of SYSTIML */
+ ptp_read_system_prets(sts);
systim_next = (u64)er32(SYSTIML);
+ ptp_read_system_postts(sts);
systim_next |= (u64)er32(SYSTIMH) << 32;
time_delta = systim_next - systim;
@@ -4353,15 +4359,16 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
}
/**
- * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
- * @cc: cyclecounter structure
+ * e1000e_read_systim - read SYSTIM register
+ * @adapter: board private structure
+ * @sts: structure which will contain system time before and after reading
+ * SYSTIML, may be NULL
**/
-static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+ struct ptp_system_timestamp *sts)
{
- struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
- cc);
struct e1000_hw *hw = &adapter->hw;
- u32 systimel, systimeh;
+ u32 systimel, systimel_2, systimeh;
u64 systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
@@ -4369,11 +4376,15 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
* will experience a huge non linear increment in the systime value
* to fix that we test for overflow and if true, we re-read systime.
*/
+ ptp_read_system_prets(sts);
systimel = er32(SYSTIML);
+ ptp_read_system_postts(sts);
systimeh = er32(SYSTIMH);
/* Is systimel is so large that overflow is possible? */
if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
- u32 systimel_2 = er32(SYSTIML);
+ ptp_read_system_prets(sts);
+ systimel_2 = er32(SYSTIML);
+ ptp_read_system_postts(sts);
if (systimel > systimel_2) {
/* There was an overflow, read again SYSTIMH, and use
* systimel_2
@@ -4386,12 +4397,24 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
systim |= (u64)systimeh << 32;
if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
- systim = e1000e_sanitize_systim(hw, systim);
+ systim = e1000e_sanitize_systim(hw, systim, sts);
return systim;
}
/**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+ cc);
+
+ return e1000e_read_systim(adapter, NULL);
+}
+
+/**
* e1000_sw_init - Initialize general software structures (struct e1000_adapter)
* @adapter: board private structure to initialize
*
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 37c76945ad9b..1a4c65d9feb4 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -161,22 +161,30 @@ static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp,
#endif/*CONFIG_E1000E_HWTS*/
/**
- * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * e1000e_phc_gettimex - Reads the current time from the hardware clock and
+ * system clock
* @ptp: ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec structure to hold the current PHC time
+ * @sts: structure to hold the current system time
*
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
-static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int e1000e_phc_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- u64 ns;
+ u64 cycles, ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
- ns = timecounter_read(&adapter->tc);
+
+ /* NOTE: Non-monotonic SYSTIM readings may be returned */
+ cycles = e1000e_read_systim(adapter, sts);
+ ns = timecounter_cyc2time(&adapter->tc, cycles);
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -232,9 +240,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
+ u64 ns;
- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&adapter->tc);
+ ts = ns_to_timespec64(ns);
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
@@ -251,7 +262,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
- .gettime64 = e1000e_phc_gettime,
+ .gettimex64 = e1000e_phc_gettimex,
.settime64 = e1000e_phc_settime,
.enable = e1000e_phc_enable,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5b2a50e5798f..6fd15a734324 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1465,11 +1465,11 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
-
- /* re-enable the q_vector */
- fm10k_qv_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ fm10k_qv_enable(q_vector);
return min(work_done, budget - 1);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 876cac317e79..8de9085bba9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -122,6 +122,7 @@ enum i40e_state_t {
__I40E_MDD_EVENT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
+ __I40E_TIMEOUT_RECOVERY_PENDING,
__I40E_MISC_IRQ_REQUESTED,
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
@@ -146,6 +147,7 @@ enum i40e_state_t {
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
+ __I40E_VIRTCHNL_OP_PENDING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
@@ -494,7 +496,6 @@ struct i40e_pf {
#define I40E_HW_STOP_FW_LLDP BIT(16)
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
-#define I40E_HW_STOPPABLE_FW_LLDP BIT(19)
u32 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 501ee718177f..7ab61f6ebb5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -588,6 +588,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ }
+ if (hw->mac.type == I40E_MAC_X722 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
}
/* Newer versions of firmware require lock when reading the NVM */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 80e3eec6134e..11506102471c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,7 +11,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0006
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
@@ -20,6 +20,8 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
struct i40e_aq_desc {
__le16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 85f75b5978fc..97a9b1fb4763 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3723,6 +3723,9 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
i40e_status status;
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9f8464f80783..a6bc7847346b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -906,6 +906,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ks->base.speed = SPEED_100;
break;
default:
+ ks->base.speed = SPEED_UNKNOWN;
break;
}
ks->base.duplex = DUPLEX_FULL;
@@ -1335,6 +1336,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
i40e_status status;
u8 aq_failures;
int err = 0;
+ u32 is_an;
/* Changing the port's flow control is not supported if this isn't the
* port's controlling PF
@@ -1347,15 +1349,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
- if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE)) {
+ is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
+ if (pause->autoneg != is_an) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
/* If we have link and don't have autoneg */
- if (!test_bit(__I40E_DOWN, pf->state) &&
- !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+ if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
@@ -1406,7 +1407,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
- if (!test_bit(__I40E_DOWN, pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
/* Give it a little more time to try to come back */
msleep(75);
if (!test_bit(__I40E_DOWN, pf->state))
@@ -2377,7 +2378,8 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
+ | (wol->wolopts != WAKE_FILTER))
return -EOPNOTSUPP;
/* is this a new value? */
@@ -4659,14 +4661,15 @@ flags_complete:
return -EOPNOTSUPP;
/* If the driver detected FW LLDP was disabled on init, this flag could
- * be set, however we do not support _changing_ the flag if NPAR is
- * enabled or FW API version < 1.7. There are situations where older
- * FW versions/NPAR enabled PFs could disable LLDP, however we _must_
- * not allow the user to enable/disable LLDP with this flag on
- * unsupported FW versions.
+ * be set, however we do not support _changing_ the flag:
+ * - on XL710 if NPAR is enabled or FW API version < 1.7
+ * - on X722 with FW API version < 1.6
+ * There are situations where older FW versions/NPAR enabled PFs could
+ * disable LLDP, however we _must_ not allow the user to enable/disable
+ * LLDP with this flag on unsupported FW versions.
*/
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) {
+ if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
dev_warn(&pf->pdev->dev,
"Device does not support changing FW LLDP\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a3f45335437c..6d5b13f69dec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,8 +26,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_MINOR 7
+#define DRV_VERSION_BUILD 6
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -338,6 +338,10 @@ static void i40e_tx_timeout(struct net_device *netdev)
(pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
return; /* don't do any new action before the next timeout */
+ /* don't kick off another recovery if one is already pending */
+ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+ return;
+
if (tx_ring) {
head = i40e_get_head(tx_ring);
/* Read interrupt register */
@@ -1493,8 +1497,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
bool found = false;
int bkt;
- WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
- "Missing mac_filter_hash_lock\n");
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (ether_addr_equal(macaddr, f->macaddr)) {
__i40e_del_filter(vsi, f);
@@ -9632,6 +9635,7 @@ end_core_reset:
clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery:
clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
}
/**
@@ -11332,16 +11336,15 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
- /* Stopping the FW LLDP engine is only supported on the
- * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
- * engine is not supported if NPAR is functioning on this
- * part
+ /* Stopping FW LLDP engine is supported on XL710 and X722
+ * starting from FW versions determined in i40e_init_adminq.
+ * Stopping the FW LLDP engine is not supported on XL710
+ * if NPAR is functioning so unset this hw flag in this case.
*/
if (pf->hw.mac.type == I40E_MAC_XL710 &&
- !pf->hw.func_caps.npar_enable &&
- (pf->hw.aq.api_maj_ver > 1 ||
- (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
- pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
+ pf->hw.func_caps.npar_enable &&
+ (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
@@ -14302,23 +14305,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strncpy(width, "8", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strncpy(width, "4", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strncpy(width, "2", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strncpy(width, "1", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 1199f0502d6d..e6fc0aff8c99 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -694,7 +694,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strncpy(pf->ptp_caps.name, i40e_driver_name,
+ strlcpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index aef3c89ee79c..a0b1575468fc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2667,10 +2667,11 @@ tx_only:
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
-
- i40e_update_enable_itr(vsi, q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ i40e_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}
@@ -3473,6 +3474,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
@@ -3526,6 +3529,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
u16 i = xdp_ring->next_to_use;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
+ void *data = xdpf->data;
u32 size = xdpf->len;
dma_addr_t dma;
@@ -3533,8 +3537,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
-
- dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
if (dma_mapping_error(xdp_ring->dev, dma))
return I40E_XDP_CONSUMED;
@@ -3652,8 +3655,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- skb_tx_timestamp(skb);
-
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 7df969c59855..2781ab91ca82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -615,6 +615,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
u64 flags;
/* Used in set switch config AQ command */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index ac5698ed0b11..2ac23ebfbf31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1112,7 +1112,8 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
return I40E_ERR_PARAM;
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ (allmulti || alluni)) {
dev_err(&pf->pdev->dev,
"Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
@@ -1675,13 +1676,20 @@ err_out:
int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
- return i40e_pci_sriov_enable(pdev, num_vfs);
+ ret = i40e_pci_sriov_enable(pdev, num_vfs);
+ goto sriov_configure_out;
}
if (!pci_vfs_assigned(pf->pdev)) {
@@ -1690,9 +1698,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto sriov_configure_out;
}
- return 0;
+sriov_configure_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
}
/***********************virtual channel routines******************/
@@ -3893,6 +3904,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
@@ -3941,6 +3957,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -3992,6 +4009,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4107,6 +4129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
ret = 0;
error_pvid:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4128,6 +4151,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4154,6 +4182,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
vf->tx_rate = max_tx_rate;
error:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4174,6 +4203,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4209,6 +4243,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ret = 0;
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4230,6 +4265,11 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
int abs_vf_id;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4273,6 +4313,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4294,6 +4335,11 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4327,6 +4373,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
ret = -EIO;
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4345,15 +4392,22 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vf = &pf->vf[vf_id];
@@ -4376,5 +4430,6 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index bf67d62e2b5f..f9621026beef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -13,9 +13,9 @@
#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
-#define I40E_VLAN_PRIORITY_SHIFT 12
+#define I40E_VLAN_PRIORITY_SHIFT 13
#define I40E_VLAN_MASK 0xFFF
-#define I40E_PRIORITY_MASK 0x7000
+#define I40E_PRIORITY_MASK 0xE000
/* Various queue ctrls */
enum i40e_queue_ctrl {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index fb9bfad96daf..9b4d7cec2e18 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1761,10 +1761,11 @@ tx_only:
if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
-
- iavf_update_enable_itr(vsi, q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ iavf_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}
@@ -2343,6 +2344,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
@@ -2461,8 +2464,6 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
if (tso < 0)
goto out_drop;
- skb_tx_timestamp(skb);
-
/* always enable CRC insertion offload */
td_cmd |= IAVF_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b8548370f1c7..a385575600f6 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -52,7 +52,6 @@ extern const char ice_drv_ver[];
#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
-#define ICE_MAX_VSI_ALLOC 130
#define ICE_MAX_TXQS 2048
#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
@@ -97,14 +96,14 @@ extern const char ice_drv_ver[];
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
-/* Macros for each tx/rx ring in a VSI */
+/* Macros for each Tx/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
-/* Macros for each allocated tx/rx ring whether used or not in a VSI */
+/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
@@ -113,7 +112,9 @@ extern const char ice_drv_ver[];
struct ice_tc_info {
u16 qoffset;
- u16 qcount;
+ u16 qcount_tx;
+ u16 qcount_rx;
+ u8 netdev_tc;
};
struct ice_tc_cfg {
@@ -149,10 +150,10 @@ enum ice_state {
__ICE_RESET_FAILED, /* set by reset/rebuild */
/* When checking for the PF to be in a nominal operating state, the
* bits that are grouped at the beginning of the list need to be
- * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
- * be checked. If you need to add a bit into consideration for nominal
+ * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+ * be checked. If you need to add a bit into consideration for nominal
* operating state, it must be added before
- * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
+ * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* without appropriate consideration.
*/
__ICE_STATE_NOMINAL_CHECK_BITS,
@@ -182,8 +183,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
- struct ice_ring **rx_rings; /* rx ring array */
- struct ice_ring **tx_rings; /* tx ring array */
+ struct ice_ring **rx_rings; /* Rx ring array */
+ struct ice_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -200,8 +201,8 @@ struct ice_vsi {
int sw_base_vector; /* Irq base for OS reserved vectors */
int hw_base_vector; /* HW (absolute) index of a vector */
enum ice_vsi_type type;
- u16 vsi_num; /* HW (absolute) index of this VSI */
- u16 idx; /* software index in pf->vsi[] */
+ u16 vsi_num; /* HW (absolute) index of this VSI */
+ u16 idx; /* software index in pf->vsi[] */
/* Interrupt thresholds */
u16 work_lmt;
@@ -254,8 +255,8 @@ struct ice_q_vector {
struct ice_ring_container tx;
struct irq_affinity_notify affinity_notify;
u16 v_idx; /* index in the vsi->q_vector array. */
- u8 num_ring_tx; /* total number of tx rings in vector */
- u8 num_ring_rx; /* total number of rx rings in vector */
+ u8 num_ring_tx; /* total number of Tx rings in vector */
+ u8 num_ring_rx; /* total number of Rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
@@ -307,10 +308,10 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u16 num_lan_tx; /* num lan tx queues setup */
- u16 num_lan_rx; /* num lan rx queues setup */
- u16 q_left_tx; /* remaining num tx queues left unclaimed */
- u16 q_left_rx; /* remaining num rx queues left unclaimed */
+ u16 num_lan_tx; /* num lan Tx queues setup */
+ u16 num_lan_rx; /* num lan Rx queues setup */
+ u16 q_left_tx; /* remaining num Tx queues left unclaimed */
+ u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
u16 num_alloc_vsi;
u16 corer_count; /* Core reset count */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6653555f55dd..fcdcd80b18e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -5,7 +5,7 @@
#define _ICE_ADMINQ_CMD_H_
/* This header file defines the Admin Queue commands, error codes and
- * descriptor format. It is shared between Firmware and Software.
+ * descriptor format. It is shared between Firmware and Software.
*/
#define ICE_MAX_VSI 768
@@ -87,6 +87,7 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
+#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
@@ -462,7 +463,7 @@ struct ice_aqc_sw_rules {
};
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
- * This structures describes the lookup rules and associated actions. "index"
+ * This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
@@ -1065,10 +1066,10 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LAST_CMD BIT(0)
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
#define ICE_AQC_NVM_PRESERVATION_S 1
-#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S)
-#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
-#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
__le16 module_typeid;
__le16 length;
@@ -1110,7 +1111,7 @@ struct ice_aqc_get_set_rss_keys {
};
/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
-struct ice_aqc_get_set_rss_lut {
+struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
@@ -1314,10 +1315,10 @@ struct ice_aqc_get_clear_fw_log {
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
- * (ATQ). The firmware writes back onto the command descriptor and returns
- * the result of the command. Asynchronous events that are not an immediate
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
* result of the command are written to the Admin Receive Queue (ARQ) using
- * the same descriptor format. Descriptors are in little-endian notation with
+ * the same descriptor format. Descriptors are in little-endian notation with
* 32-bit words.
*/
struct ice_aq_desc {
@@ -1379,10 +1380,10 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
- ICE_AQ_RC_OK = 0, /* success */
+ ICE_AQ_RC_OK = 0, /* Success */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
- ICE_AQ_RC_EEXIST = 13, /* object already exists */
+ ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 554fd707a6d6..4c1d35da940d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- ice_init_def_sw_recp(hw);
-
- return 0;
+ return ice_init_def_sw_recp(hw);
}
/**
@@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->evb_veb = true;
- /* Query the allocated resources for tx scheduler */
+ /* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
ice_debug(hw, ICE_DBG_SCHED,
@@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
- * @rxq_index: the index of the rx queue
+ * @rxq_index: the index of the Rx queue
*
* Copies rxq context from dense structure to hw register space
*/
@@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* ice_write_rxq_ctx
* @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the rxq context
- * @rxq_index: the index of the rx queue
+ * @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
* it to hw register space
@@ -1387,6 +1385,27 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
+ * ice_get_guar_num_vsi - determine number of guar VSI for a PF
+ * @hw: pointer to the hw structure
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of VSI per PF.
+ */
+static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+{
+ u8 funcs;
+
+#define ICE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ ICE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return ICE_MAX_VSI / funcs;
+}
+
+/**
* ice_parse_caps - parse function/device capabilities
* @hw: pointer to the hw struct
* @buf: pointer to a buffer containing function/device capability records
@@ -1428,6 +1447,12 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Valid Functions = %d\n",
+ caps->valid_functions);
+ break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
@@ -1457,10 +1482,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guaranteed_num_vsi = number;
+ func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
- func_p->guaranteed_num_vsi);
+ number);
}
break;
case ICE_AQC_CAPS_RSS:
@@ -1688,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
{
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 84c967294eaf..2bf5e11f559a 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -3,6 +3,26 @@
#include "ice_common.h"
+#define ICE_CQ_INIT_REGS(qinfo, prefix) \
+do { \
+ (qinfo)->sq.head = prefix##_ATQH; \
+ (qinfo)->sq.tail = prefix##_ATQT; \
+ (qinfo)->sq.len = prefix##_ATQLEN; \
+ (qinfo)->sq.bah = prefix##_ATQBAH; \
+ (qinfo)->sq.bal = prefix##_ATQBAL; \
+ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
+ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
+ (qinfo)->rq.head = prefix##_ARQH; \
+ (qinfo)->rq.tail = prefix##_ARQT; \
+ (qinfo)->rq.len = prefix##_ARQLEN; \
+ (qinfo)->rq.bah = prefix##_ARQBAH; \
+ (qinfo)->rq.bal = prefix##_ARQBAL; \
+ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
+ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
+} while (0)
+
/**
* ice_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -13,23 +33,7 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- cq->sq.head = PF_FW_ATQH;
- cq->sq.tail = PF_FW_ATQT;
- cq->sq.len = PF_FW_ATQLEN;
- cq->sq.bah = PF_FW_ATQBAH;
- cq->sq.bal = PF_FW_ATQBAL;
- cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
- cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
- cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
-
- cq->rq.head = PF_FW_ARQH;
- cq->rq.tail = PF_FW_ARQT;
- cq->rq.len = PF_FW_ARQLEN;
- cq->rq.bah = PF_FW_ARQBAH;
- cq->rq.bal = PF_FW_ARQBAL;
- cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
- cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
- cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
+ ICE_CQ_INIT_REGS(cq, PF_FW);
}
/**
@@ -42,24 +46,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->mailboxq;
- /* set head and tail registers in our local struct */
- cq->sq.head = PF_MBX_ATQH;
- cq->sq.tail = PF_MBX_ATQT;
- cq->sq.len = PF_MBX_ATQLEN;
- cq->sq.bah = PF_MBX_ATQBAH;
- cq->sq.bal = PF_MBX_ATQBAL;
- cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
- cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
- cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
-
- cq->rq.head = PF_MBX_ARQH;
- cq->rq.tail = PF_MBX_ARQT;
- cq->rq.len = PF_MBX_ARQLEN;
- cq->rq.bah = PF_MBX_ARQBAH;
- cq->rq.bal = PF_MBX_ARQBAL;
- cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
- cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
- cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+ ICE_CQ_INIT_REGS(cq, PF_MBX);
}
/**
@@ -131,37 +118,20 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
- * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
+ * ice_free_cq_ring - Free control queue ring
* @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
+ * @ring: pointer to the specific control queue ring
*
- * This assumes the posted send buffers have already been cleaned
+ * This assumes the posted buffers have already been cleaned
* and de-allocated
*/
-static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
{
- dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
- cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
- cq->sq.desc_buf.va = NULL;
- cq->sq.desc_buf.pa = 0;
- cq->sq.desc_buf.size = 0;
-}
-
-/**
- * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- *
- * This assumes the posted receive buffers have already been cleaned
- * and de-allocated
- */
-static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
-{
- dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
- cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
- cq->rq.desc_buf.va = NULL;
- cq->rq.desc_buf.pa = 0;
- cq->rq.desc_buf.size = 0;
+ dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
+ ring->desc_buf.va, ring->desc_buf.pa);
+ ring->desc_buf.va = NULL;
+ ring->desc_buf.pa = 0;
+ ring->desc_buf.size = 0;
}
/**
@@ -280,54 +250,23 @@ unwind_alloc_sq_bufs:
return ICE_ERR_NO_MEMORY;
}
-/**
- * ice_free_rq_bufs - Free ARQ buffer info elements
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- */
-static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
-{
- int i;
-
- /* free descriptors */
- for (i = 0; i < cq->num_rq_entries; i++) {
- dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
- cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
- cq->rq.r.rq_bi[i].va = NULL;
- cq->rq.r.rq_bi[i].pa = 0;
- cq->rq.r.rq_bi[i].size = 0;
- }
-
- /* free the dma header */
- devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
-}
-
-/**
- * ice_free_sq_bufs - Free ATQ buffer info elements
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- */
-static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static enum ice_status
+ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
- int i;
+ /* Clear Head and Tail */
+ wr32(hw, ring->head, 0);
+ wr32(hw, ring->tail, 0);
- /* only unmap if the address is non-NULL */
- for (i = 0; i < cq->num_sq_entries; i++)
- if (cq->sq.r.sq_bi[i].pa) {
- dmam_free_coherent(ice_hw_to_dev(hw),
- cq->sq.r.sq_bi[i].size,
- cq->sq.r.sq_bi[i].va,
- cq->sq.r.sq_bi[i].pa);
- cq->sq.r.sq_bi[i].va = NULL;
- cq->sq.r.sq_bi[i].pa = 0;
- cq->sq.r.sq_bi[i].size = 0;
- }
+ /* set starting point */
+ wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
+ wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
+ wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
- /* free the buffer info list */
- devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
+ /* Check one register to verify that config was applied */
+ if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
+ return ICE_ERR_AQ_ERROR;
- /* free the dma header */
- devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+ return 0;
}
/**
@@ -340,23 +279,7 @@ static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- u32 reg = 0;
-
- /* Clear Head and Tail */
- wr32(hw, cq->sq.head, 0);
- wr32(hw, cq->sq.tail, 0);
-
- /* set starting point */
- wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
- wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
- wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
-
- /* Check one register to verify that config was applied */
- reg = rd32(hw, cq->sq.bal);
- if (reg != lower_32_bits(cq->sq.desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
-
- return 0;
+ return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
}
/**
@@ -369,25 +292,15 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- u32 reg = 0;
-
- /* Clear Head and Tail */
- wr32(hw, cq->rq.head, 0);
- wr32(hw, cq->rq.tail, 0);
+ enum ice_status status;
- /* set starting point */
- wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
- wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
- wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
+ status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
+ if (status)
+ return status;
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
- /* Check one register to verify that config was applied */
- reg = rd32(hw, cq->rq.bal);
- if (reg != lower_32_bits(cq->rq.desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
-
return 0;
}
@@ -444,7 +357,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
- ice_free_ctrlq_sq_ring(hw, cq);
+ ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
return ret_code;
@@ -503,12 +416,33 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
- ice_free_ctrlq_rq_ring(hw, cq);
+ ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) { \
+ dmam_free_coherent(ice_hw_to_dev(hw), \
+ (qi)->ring.r.ring##_bi[i].size,\
+ (qi)->ring.r.ring##_bi[i].va,\
+ (qi)->ring.r.ring##_bi[i].pa);\
+ (qi)->ring.r.ring##_bi[i].va = NULL; \
+ (qi)->ring.r.ring##_bi[i].pa = 0; \
+ (qi)->ring.r.ring##_bi[i].size = 0; \
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ /* free dma head */ \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -538,8 +472,8 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers and the ring itself */
- ice_free_sq_bufs(hw, cq);
- ice_free_ctrlq_sq_ring(hw, cq);
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
+ ice_free_cq_ring(hw, &cq->sq);
shutdown_sq_out:
mutex_unlock(&cq->sq_lock);
@@ -606,8 +540,8 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.count = 0;
/* free ring buffers and the ring itself */
- ice_free_rq_bufs(hw, cq);
- ice_free_ctrlq_rq_ring(hw, cq);
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
+ ice_free_cq_ring(hw, &cq->rq);
shutdown_rq_out:
mutex_unlock(&cq->rq_lock);
@@ -657,7 +591,6 @@ init_ctrlq_free_rq:
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
- *
*/
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -841,7 +774,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
- * This is the main send command routine for the ATQ. It runs the q,
+ * This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
*/
enum ice_status
@@ -1035,7 +968,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
+ * the contents through e. It can also return how many events are
* left to process through 'pending'.
*/
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 648acdb4c644..3b6e387f5440 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* The PF_STATs are appended to the netdev stats only when ethtool -S
* is queried on the base PF netdev.
*/
-static struct ice_stats ice_gstrings_pf_stats[] = {
+static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
@@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
};
-static u32 ice_regs_dump_list[] = {
+static const u32 ice_regs_dump_list[] = {
PFGEN_STATE,
PRTGEN_STATUS,
QRX_CTRL(0),
@@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* a private ethtool flag). This is due to the nature of the
* ethtool stats API.
*
- * User space programs such as ethtool must make 3 separate
+ * Userspace programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into
- * user space, changes to the number or size could result in
+ * userspace, changes to the number or size could result in
* undefined memory access or incorrect string<->value
* correlations for statistics.
*
@@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
- bool link_up;
pi = vsi->port_info;
- hw_link_info = &pi->phy.link_info;
- link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+ /* If VSI state is up, then restart autoneg with link up */
+ if (!test_bit(__ICE_DOWN, vsi->back->state))
+ status = ice_aq_set_link_restart_an(pi, true, NULL);
+ else
+ status = ice_aq_set_link_restart_an(pi, false, NULL);
- status = ice_aq_set_link_restart_an(pi, link_up, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/**
* ice_set_pauseparam - Set Flow Control parameter
* @netdev: network interface device structure
- * @pause: return tx/rx flow control status
+ * @pause: return Tx/Rx flow control status
*/
static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
}
/**
- * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
* @netdev: network interface device structure
*
* Returns the table size.
@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
}
/**
- * ice_get_rxfh - get the rx flow hash indirection table
+ * ice_get_rxfh - get the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
@@ -1603,7 +1603,7 @@ out:
}
/**
- * ice_set_rxfh - set the rx flow hash indirection table
+ * ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 596b9fb1c510..5507928c8fbe 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -7,6 +7,9 @@
#define _ICE_HW_AUTOGEN_H_
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_HEAD_S 0
+#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 7d2a66739e3f..bb51dd7defb5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -6,11 +6,11 @@
union ice_32byte_rx_desc {
struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
+ __le64 rsvd1;
+ __le64 rsvd2;
} read;
struct {
struct {
@@ -105,11 +105,11 @@ enum ice_rx_ptype_payload_layer {
*/
union ice_32b_rx_flex_desc {
struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
} read;
struct {
/* Qword 0 */
@@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits {
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
+#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
+#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
/* RLAN Rx queue context data
*
@@ -274,18 +277,18 @@ struct ice_rlan_ctx {
u16 dbuf; /* bigger than needed, see above for reason */
#define ICE_RLAN_CTX_HBUF_S 6
u16 hbuf; /* bigger than needed, see above for reason */
- u8 dtype;
- u8 dsize;
- u8 crcstrip;
- u8 l2tsel;
- u8 hsplit_0;
- u8 hsplit_1;
- u8 showiv;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
- u8 tphrdesc_ena;
- u8 tphwdesc_ena;
- u8 tphdata_ena;
- u8 tphhead_ena;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
};
@@ -413,35 +416,35 @@ enum ice_tx_ctx_desc_cmd_bits {
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
- u8 port_num;
+ u8 port_num;
u16 cgd_num; /* bigger than needed, see above for reason */
- u8 pf_num;
+ u8 pf_num;
u16 vmvf_num;
- u8 vmvf_type;
+ u8 vmvf_type;
#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
- u8 tsyn_ena;
- u8 alt_vlan;
+ u8 tsyn_ena;
+ u8 alt_vlan;
u16 cpuid; /* bigger than needed, see above for reason */
- u8 wb_mode;
- u8 tphrd_desc;
- u8 tphrd;
- u8 tphwr_desc;
+ u8 wb_mode;
+ u8 tphrd_desc;
+ u8 tphrd;
+ u8 tphwr_desc;
u16 cmpq_id;
u16 qnum_in_func;
- u8 itr_notification_mode;
- u8 adjust_prof_id;
+ u8 itr_notification_mode;
+ u8 adjust_prof_id;
u32 qlen; /* bigger than needed, see above for reason */
- u8 quanta_prof_idx;
- u8 tso_ena;
+ u8 quanta_prof_idx;
+ u8 tso_ena;
u16 tso_qnum;
- u8 legacy_int;
- u8 drop_ena;
- u8 cache_prof_idx;
- u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal do not write */
+ u8 legacy_int;
+ u8 drop_ena;
+ u8 cache_prof_idx;
+ u8 pkt_shaper_prof_idx;
+ u8 int_q_state; /* width not needed - internal do not write */
};
/* macro to make the table lines short */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 1041fa2a7767..29b1dcfd4331 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
u16 pf_q;
int err;
- /* what is RX queue number in global space of 2K Rx queues */
+ /* what is Rx queue number in global space of 2K Rx queues */
pf_q = vsi->rxq_map[ring->q_index];
/* clear the context structure first */
@@ -174,15 +174,15 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
{
int i;
- for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+ for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
break;
- usleep_range(10, 20);
+ usleep_range(20, 40);
}
- if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+ if (i >= ICE_Q_WAIT_MAX_RETRY)
return -ETIMEDOUT;
return 0;
@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, numq_tc;
- u16 pow = 0, max_rss = 0, qcount;
+ u16 offset = 0, qmap = 0, tx_count = 0;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
+ u16 tx_numq_tc, rx_numq_tc;
+ u16 pow = 0, max_rss = 0;
bool ena_tc0 = false;
+ u8 netdev_tc = 0;
int i;
/* at least TC0 should be enabled by default */
@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.ena_tc |= 1;
}
- numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+ rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+ if (!rx_numq_tc)
+ rx_numq_tc = 1;
+ tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
+ if (!tx_numq_tc)
+ tx_numq_tc = 1;
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
@@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI
*/
- qcount = numq_tc;
+ qcount_rx = rx_numq_tc;
+
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
@@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_SMALL_RSS_QS;
- qcount = min_t(int, numq_tc, max_rss);
- qcount = min_t(int, qcount, vsi->rss_size);
+ qcount_rx = min_t(int, rx_numq_tc, max_rss);
+ qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount);
+ pow = order_base_2(qcount_rx);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
- vsi->tc_cfg.tc_info[i].qcount = 1;
+ vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+ vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+ vsi->tc_cfg.tc_info[i].netdev_tc = 0;
ctxt->info.tc_mapping[i] = 0;
continue;
}
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
- vsi->tc_cfg.tc_info[i].qcount = qcount;
+ vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+ vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
+ vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
- offset += qcount;
+ offset += qcount_rx;
+ tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
-
- vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
+ vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1000,7 +1012,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
@@ -1039,7 +1051,7 @@ out:
* ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
+ * We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
*/
static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
@@ -1188,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int i;
- /* Allocate tx_rings */
+ /* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring;
@@ -1207,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
vsi->tx_rings[i] = ring;
}
- /* Allocate rx_rings */
+ /* Allocate Rx rings */
for (i = 0; i < vsi->alloc_rxq; i++) {
struct ice_ring *ring;
@@ -1611,55 +1623,62 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
+ u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
int err = 0, tc = 0;
- u8 num_q_grps;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
- if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
- err = -EINVAL;
- goto err_cfg_txqs;
- }
qg_buf->num_txqs = 1;
num_q_grps = 1;
- /* set up and configure the Tx queues */
- ice_for_each_txq(vsi, i) {
- struct ice_tlan_ctx tlan_ctx = { 0 };
+ /* set up and configure the Tx queues for each enabled TC */
+ for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
+ break;
- pf_q = vsi->txq_map[i];
- ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+
+ pf_q = vsi->txq_map[q_idx];
+ ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
+ pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ vsi->tx_rings[q_idx]->tail =
+ pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ num_q_grps, qg_buf, buf_len,
+ NULL);
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ err = -ENODEV;
+ goto err_cfg_txqs;
+ }
- /* init queue specific tail reg. It is referred as transmit
- * comm scheduler queue doorbell.
- */
- vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- num_q_grps, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- err = -ENODEV;
- goto err_cfg_txqs;
- }
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ vsi->tx_rings[q_idx]->txq_teid =
+ le32_to_cpu(txq->q_teid);
- /* Add Tx Queue TEID into the VSI Tx ring from the response
- * This will complete configuring and enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- vsi->tx_rings[i]->txq_teid =
- le32_to_cpu(txq->q_teid);
+ q_idx++;
+ }
}
err_cfg_txqs:
devm_kfree(&pf->pdev->dev, qg_buf);
@@ -1908,7 +1927,8 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) {
u16 v_idx;
- if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+ if (!vsi->tx_rings || !vsi->tx_rings[i] ||
+ !vsi->tx_rings[i]->q_vector) {
err = -EINVAL;
goto err_out;
}
@@ -2056,6 +2076,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
+ /* set tc configuration */
+ ice_vsi_set_tc_cfg(vsi);
+
/* create the VSI */
ret = ice_vsi_init(vsi);
if (ret)
@@ -2113,17 +2136,13 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
- /* if VSI type is not recognized, clean up the resources and
- * exit
- */
+ /* clean up the resources and exit */
goto unroll_vsi_init;
}
- ice_vsi_set_tc_cfg(vsi);
-
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
+ max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -2314,7 +2333,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
int start = res->search_hint;
int end = start;
- if ((start + needed) > res->num_entries)
+ if ((start + needed) > res->num_entries)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
@@ -2491,6 +2510,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_clear_rings(vsi);
@@ -2518,11 +2538,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf;
int ret, i;
if (!vsi)
return -EINVAL;
+ pf = vsi->back;
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
@@ -2532,6 +2555,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
+ ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
ret = ice_vsi_init(vsi);
@@ -2578,11 +2602,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
break;
}
- ice_vsi_set_tc_cfg(vsi);
-
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
+ max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 333312a1d595..e45e57499d91 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
+ if (hw->port_info)
+ ice_sched_clear_port(hw->port_info);
+
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
@@ -405,7 +408,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
* OICR interrupt. The OICR handler (ice_misc_intr) determines what type
* of reset is pending and sets bits in pf->state indicating the reset
- * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
+ * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
* prepare for pending reset if not already (for PF software-initiated
* global resets the software should already be prepared for it as
* indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
@@ -1379,7 +1382,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
* @pf: board private structure
*
* This sets up the handler for MSIX 0, which is used to manage the
- * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
* when in MSI or Legacy interrupt mode.
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
@@ -1783,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
- /* only 1 rx queue unless RSS is enabled */
+ /* only 1 Rx queue unless RSS is enabled */
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
pf->num_lan_rx = 1;
else
@@ -2091,8 +2094,7 @@ static int ice_probe(struct pci_dev *pdev,
ice_determine_q_usage(pf);
- pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
- hw->func_caps.guaranteed_num_vsi);
+ pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
@@ -2544,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
-
err = ice_vsi_cfg_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -3138,8 +3139,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
*/
-static void ice_dis_vsi(struct ice_vsi *vsi)
+static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
@@ -3148,9 +3150,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- rtnl_unlock();
+ if (!locked) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ }
} else {
ice_vsi_close(vsi);
}
@@ -3189,7 +3195,7 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- ice_dis_vsi(pf->vsi[v]);
+ ice_dis_vsi(pf->vsi[v], false);
}
/**
@@ -3668,7 +3674,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
- netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
+ netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
mode, status, hw->adminq.sq_last_status);
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
@@ -3691,40 +3697,36 @@ static void ice_tx_timeout(struct net_device *netdev)
struct ice_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- u32 head, val = 0, i;
int hung_queue = -1;
+ u32 i;
pf->tx_timeout_count++;
- /* find the stopped queue the same way the stack does */
+ /* find the stopped queue the same way dev_watchdog() does */
for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
unsigned long trans_start;
+ struct netdev_queue *q;
q = netdev_get_tx_queue(netdev, i);
trans_start = q->trans_start;
if (netif_xmit_stopped(q) &&
time_after(jiffies,
- (trans_start + netdev->watchdog_timeo))) {
+ trans_start + netdev->watchdog_timeo)) {
hung_queue = i;
break;
}
}
- if (i == netdev->num_tx_queues) {
+ if (i == netdev->num_tx_queues)
netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- } else {
+ else
/* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (hung_queue ==
- vsi->tx_rings[i]->q_index) {
+ for (i = 0; i < vsi->num_txq; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ if (hung_queue == vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i];
break;
}
- }
- }
- }
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period.
@@ -3736,17 +3738,20 @@ static void ice_tx_timeout(struct net_device *netdev)
return;
if (tx_ring) {
- head = tx_ring->next_to_clean;
+ struct ice_hw *hw = &pf->hw;
+ u32 head, val = 0;
+
+ head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
+ QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- val = rd32(&pf->hw,
+ val = rd32(hw,
GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
- tx_ring->vsi->hw_base_vector));
+ tx_ring->vsi->hw_base_vector));
- netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use,
- readl(tx_ring->tail), val);
+ head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
@@ -3780,7 +3785,7 @@ static void ice_tx_timeout(struct net_device *netdev)
* @netdev: network interface device structure
*
* The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
+ * active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the netdev watchdog is enabled,
* and the stack is notified that the interface is ready.
@@ -3813,7 +3818,7 @@ static int ice_open(struct net_device *netdev)
* @netdev: network interface device structure
*
* The stop entry point is called when an interface is de-activated by the OS,
- * and the netdevice enters the DOWN state. The hardware is still under the
+ * and the netdevice enters the DOWN state. The hardware is still under the
* driver's control, but the netdev interface is disabled.
*
* Returns success only - not allowed to fail
@@ -3842,14 +3847,14 @@ ice_features_check(struct sk_buff *skb,
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
- * being requested for this frame. We can rule out both by just
+ * being requested for this frame. We can rule out both by just
* checking for CHECKSUM_PARTIAL
*/
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
/* We cannot support GSO if the MSS is going to be less than
- * 64 bytes. If it is then we need to drop support for GSO.
+ * 64 bytes. If it is then we need to drop support for GSO.
*/
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
features &= ~NETIF_F_GSO_MASK;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7cc8aa18a22b..a1681853df2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
*
* Cleanup scheduling elements from SW DB
*/
-static void ice_sched_clear_port(struct ice_port_info *pi)
+void ice_sched_clear_port(struct ice_port_info *pi)
{
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return;
@@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* This function removes the leaf node that was created by the FW
* during initialization
*/
-static void
-ice_rm_dflt_leaf_node(struct ice_port_info *pi)
+static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
{
struct ice_sched_node *node;
@@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi)
* This function frees all the nodes except root and TC that were created by
* the FW during initialization
*/
-static void
-ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
+static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
{
struct ice_sched_node *node;
@@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
* @num_nodes: pointer to num nodes array
*
* This function calculates the number of supported nodes needed to add this
- * VSI into tx tree including the VSI, parent and intermediate nodes in below
+ * VSI into Tx tree including the VSI, parent and intermediate nodes in below
* layers
*/
static void
@@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
}
/**
- * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
+ * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
- * This function adds the VSI supported nodes into tx tree including the
+ * This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
static enum ice_status
@@ -1527,7 +1525,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_sched_cfg_vsi - configure the new/exisiting VSI
+ * ice_sched_cfg_vsi - configure the new/existing VSI
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: TC number
@@ -1605,3 +1603,109 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return status;
}
+
+/**
+ * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function removes single aggregator VSI info entry from
+ * aggregator list.
+ */
+static void
+ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+{
+ struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_info *atmp;
+
+ list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ list_for_each_entry_safe(agg_vsi_info, vtmp,
+ &agg_info->agg_vsi_list, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle) {
+ list_del(&agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(pi->hw),
+ agg_vsi_info);
+ return;
+ }
+ }
+}
+
+/**
+ * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @owner: LAN or RDMA
+ *
+ * This function removes the VSI and its LAN or RDMA children nodes from the
+ * scheduler tree.
+ */
+static enum ice_status
+ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_vsi_ctx *vsi_ctx;
+ u8 i, j = 0;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return status;
+ mutex_lock(&pi->sched_lock);
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ goto exit_sched_rm_vsi_cfg;
+
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ struct ice_sched_node *vsi_node, *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, i);
+ if (!tc_node)
+ continue;
+
+ vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner) {
+ ice_free_sched_node(pi, vsi_node->children[j]);
+
+ /* reset the counter again since the num
+ * children will be updated after node removal
+ */
+ j = 0;
+ } else {
+ j++;
+ }
+ }
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children) {
+ ice_free_sched_node(pi, vsi_node);
+ vsi_ctx->sched.vsi_node[i] = NULL;
+
+ /* clean up agg related vsi info if any */
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+ }
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi_ctx->sched.max_lanq[i] = 0;
+ }
+ status = 0;
+
+exit_sched_rm_vsi_cfg:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its LAN children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 5dc9cfa04c58..da5b4c166da8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -12,6 +12,7 @@
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ u16 vsi_handle;
};
struct ice_sched_agg_info {
@@ -25,6 +26,7 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
@@ -39,4 +41,5 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 027eba4e13f8..533b989a23e1 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -46,7 +46,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
* @link_speed: variable containing the link_speed to be converted
*
* Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps. Else return
+ * If adv_link_support is true, then return link speed in Mbps. Else return
* link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
* needs to cast back to an enum virtchnl_link_speed in the case where
* adv_link_support is false, but when adv_link_support is true the caller can
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 40c9c6558956..2e5693107fa4 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status
-ice_init_def_sw_recp(struct ice_hw *hw)
+enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
{
struct ice_sw_recipe *recps;
u8 i;
@@ -130,7 +129,7 @@ ice_init_def_sw_recp(struct ice_hw *hw)
*
* NOTE: *req_desc is both an input/output parameter.
* The caller of this function first calls this function with *request_desc set
- * to 0. If the response from f/w has *req_desc set to 0, all the switch
+ * to 0. If the response from f/w has *req_desc set to 0, all the switch
* configuration information has been returned; if non-zero (meaning not all
* the information was returned), the caller should call this function again
* with *req_desc set to the previous value returned by f/w to get the
@@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
/**
* ice_fill_sw_info - Helper function to populate lb_en and lan_en
* @hw: pointer to the hardware structure
- * @f_info: filter info structure to fill/update
+ * @fi: filter info structure to fill/update
*
* This helper function populates the lb_en and lan_en elements of the provided
* ice_fltr_info struct using the switch's type and characteristics of the
* switch rule being configured.
*/
-static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
+static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
{
- f_info->lb_en = false;
- f_info->lan_en = false;
- if ((f_info->flag & ICE_FLTR_TX) &&
- (f_info->fltr_act == ICE_FWD_TO_VSI ||
- f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
- f_info->fltr_act == ICE_FWD_TO_Q ||
- f_info->fltr_act == ICE_FWD_TO_QGRP)) {
- f_info->lb_en = true;
- if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
- is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
- f_info->lan_en = true;
+ fi->lb_en = false;
+ fi->lan_en = false;
+ if ((fi->flag & ICE_FLTR_TX) &&
+ (fi->fltr_act == ICE_FWD_TO_VSI ||
+ fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ fi->fltr_act == ICE_FWD_TO_Q ||
+ fi->fltr_act == ICE_FWD_TO_QGRP)) {
+ fi->lb_en = true;
+ /* Do not set lan_en to TRUE if
+ * 1. The switch is a VEB AND
+ * 2
+ * 2.1 The lookup is MAC with unicast addr for MAC, OR
+ * 2.2 The lookup is MAC_VLAN with unicast addr for MAC
+ *
+ * In all other cases, the LAN enable has to be set to true.
+ */
+ if (!(hw->evb_veb &&
+ ((fi->lkup_type == ICE_SW_LKUP_MAC &&
+ is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+ is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
+ fi->lan_en = true;
}
}
@@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
* 1. Large Action
- * 2. Look up tx rx
+ * 2. Look up Tx Rx
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
@@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
- /* call the fill switch rule to fill the lookup tx rx structure */
+ /* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
@@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Call AQ command to add or update previously created VSI list with new VSI.
*
* Helper function to do book keeping associated with adding filter information
- * The algorithm to do the booking keeping is described below :
- * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
+ * The algorithm to do the book keeping is described below :
+ * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
* if only one VSI has been added till now
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
@@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->vsi_handle;
enum ice_adminq_opc opcode;
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_CFG;
+
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0;
@@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
/* Update the previous switch rule to a new VSI list which
- * includes current VSI thats requested
+ * includes current VSI that is requested
*/
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index fe5bbabbb41e..49fc38094185 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
/**
* ice_setup_tx_ring - Allocate the Tx descriptors
- * @tx_ring: the tx ring to set up
+ * @tx_ring: the Tx ring to set up
*
* Return 0 on success, negative on error
*/
@@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
/**
* ice_setup_rx_ring - Allocate the Rx descriptors
- * @rx_ring: the rx ring to set up
+ * @rx_ring: the Rx ring to set up
*
* Return 0 on success, negative on error
*/
@@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
rx_ring->next_to_alloc = val;
/* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
+ * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
@@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
+ * @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
@@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
/**
* ice_fetch_rx_buf - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_ring: Rx descriptor ring to transact packets on
* @rx_desc: descriptor containing info written by hardware
*
* This function allocates an skb on the fly, and populates it with the page
@@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
* ice_pull_tail - ice specific version of skb_pull_tail
* @skb: pointer to current skb being adjusted
*
- * This function is an ice specific version of __pskb_pull_tail. The
+ * This function is an ice specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
@@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
+ * This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
@@ -904,7 +904,7 @@ checksum_fail:
/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
* @ptype: the packet type decoded by hardware
@@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
/**
* ice_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
+ * @rx_ring: Rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
*
@@ -946,11 +946,11 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
- * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
- * processing. The advantage to this is that on systems that have
+ * processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the system.
*
@@ -1103,11 +1103,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
- return 0;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
+
+ return min(work_done, budget - 1);
}
/* helper function for building cmd/type/offset */
@@ -1122,7 +1125,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
- * __ice_maybe_stop_tx - 2nd level check for tx stop conditions
+ * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
@@ -1145,7 +1148,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
}
/**
- * ice_maybe_stop_tx - 1st level check for tx stop conditions
+ * ice_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
@@ -1155,6 +1158,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
+
return __ice_maybe_stop_tx(tx_ring, size);
}
@@ -1552,7 +1556,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* Finally, we add one to round up. Because 256 isn't an exact multiple of
* 3, we'll underestimate near each multiple of 12K. This is actually more
* accurate as we have 4K - 1 of wiggle room that we can fit into the last
- * segment. For our purposes this is accurate out to 1M which is orders of
+ * segment. For our purposes this is accurate out to 1M which is orders of
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
@@ -1568,7 +1572,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
}
/**
- * ice_xmit_desc_count - calculate number of tx descriptors needed
+ * ice_xmit_desc_count - calculate number of Tx descriptors needed
* @skb: send buffer
*
* Returns number of data descriptors needed for this skb.
@@ -1620,7 +1624,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
nr_frags -= ICE_MAX_BUF_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
- /* Initialize size to the negative value of gso_size minus 1. We
+ /* Initialize size to the negative value of gso_size minus 1. We
* use this as the worst case scenerio in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f4dbc81c1988..0ea428104215 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -124,6 +124,8 @@ struct ice_phy_info {
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
+ u32 valid_functions;
+
/* TX/RX queues */
u16 num_rxq; /* Number/Total RX queues */
u16 rxq_first_id; /* First queue ID for RX queues */
@@ -150,7 +152,7 @@ struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
- u32 guaranteed_num_vsi;
+ u32 guar_num_vsi;
};
/* Device wide capabilities */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e71065f9d391..05ff4f910649 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
}
-/***********************enable_vf routines*****************************/
-
/**
* ice_dis_vf_mappings
* @vf: pointer to the VF structure
@@ -215,6 +213,15 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
@@ -228,15 +235,6 @@ void ice_free_vfs(struct ice_pf *pf)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
@@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
- * We don't want to reconfigure interrupts since AVF driver doesn't
+ * We dont want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
@@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
* ice_process_vflr_event - Free VF resources via IRQ calls
* @pf: pointer to the PF structure
*
- * called from the VLFR IRQ handler to
+ * called from the VFLR IRQ handler to
* free up VF resources and state variables
*/
void ice_process_vflr_event(struct ice_pf *pf)
@@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Tx queue info from VF into VSI */
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
- /* copy Rx queue info from VF into vsi */
+ /* copy Rx queue info from VF into VSI */
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
@@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* @msg: pointer to the msg buffer
* @set: true if mac filters are being set, false otherwise
*
- * add guest mac address filter
+ * add guest MAC address filter
*/
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
@@ -1968,9 +1966,9 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* @msg: pointer to the msg buffer
*
* VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
+ * different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
+ * available queue pairs via virtchnl message response to vf.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
@@ -1991,7 +1989,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
- "VF %d tried to request %d queues. Ignoring.\n",
+ "VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
} else if (req_queues > ICE_MAX_QS_PER_VF) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 10131e0180f9..01470a8ee03a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,7 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
- u8 num_req_qs; /* num of queue pairs requested by VF */
+ u8 num_req_qs; /* num of queue pairs requested by VF */
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 5acf3b743876..c57671068245 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2113,7 +2113,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
return -EOPNOTSUPP;
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5df88ad8ac81..453ae1d9e5f3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1850,13 +1850,12 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
* configuration' in respect to these parameters.
*/
- netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
- idleslope %d sendslope %d hiCredit %d \
- locredit %d\n",
- (ring->cbs_enable) ? "enabled" : "disabled",
- (ring->launchtime_enable) ? "enabled" : "disabled", queue,
- ring->idleslope, ring->sendslope, ring->hicredit,
- ring->locredit);
+ netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
+ ring->cbs_enable ? "enabled" : "disabled",
+ ring->launchtime_enable ? "enabled" : "disabled",
+ queue,
+ ring->idleslope, ring->sendslope,
+ ring->hicredit, ring->locredit);
}
static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
@@ -6019,6 +6018,8 @@ static int igb_tx_map(struct igb_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -6147,8 +6148,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
- skb_tx_timestamp(skb);
-
if (igb_tx_map(tx_ring, first, hdr_len))
goto cleanup_tx_tstamp;
@@ -7753,11 +7752,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* If not enough Rx work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- igb_ring_irq_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igb_ring_irq_enable(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 2b95dc9c7a6a..fd3071f55bd3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -277,17 +277,53 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
unsigned long flags;
+ u32 lo, hi;
u64 ns;
spin_lock_irqsave(&igb->tmreg_lock, flags);
- ns = timecounter_read(&igb->tc);
+ ptp_read_system_prets(sts);
+ lo = rd32(E1000_SYSTIML);
+ ptp_read_system_postts(sts);
+ hi = rd32(E1000_SYSTIMH);
+
+ ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ unsigned long flags;
+ u32 lo, hi;
+ u64 ns;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ ptp_read_system_prets(sts);
+ rd32(E1000_SYSTIMR);
+ ptp_read_system_postts(sts);
+ lo = rd32(E1000_SYSTIML);
+ hi = rd32(E1000_SYSTIMH);
+
+ ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -296,16 +332,22 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
return 0;
}
-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
unsigned long flags;
spin_lock_irqsave(&igb->tmreg_lock, flags);
- igb_ptp_read_i210(igb, ts);
+ ptp_read_system_prets(sts);
+ rd32(E1000_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = rd32(E1000_SYSTIML);
+ ts->tv_sec = rd32(E1000_SYSTIMH);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -658,9 +700,12 @@ static void igb_ptp_overflow_check(struct work_struct *work)
struct igb_adapter *igb =
container_of(work, struct igb_adapter, ptp_overflow_work.work);
struct timespec64 ts;
+ u64 ns;
- igb->ptp_caps.gettime64(&igb->ptp_caps, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&igb->tc);
+ ts = ns_to_timespec64(ns);
pr_debug("igb overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
@@ -1126,7 +1171,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
@@ -1145,7 +1190,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
@@ -1173,7 +1218,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210;
adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 163e5838f7c2..a3cd7ac48d4b 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
@@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 820d49eb41ab..4eab83faec62 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1186,10 +1186,13 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
igbvf_clean_rx_irq(adapter, &work_done, budget);
- /* If not enough Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done == budget)
+ return budget;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (adapter->requested_itr & 3)
igbvf_set_itr(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index cdf18a5d9e08..b1039dd3dd13 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -5,23 +5,12 @@
#define _IGC_H_
#include <linux/kobject.h>
-
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-
#include <linux/ethtool.h>
-
#include <linux/sctp.h>
-#define IGC_ERR(args...) pr_err("igc: " args)
-
-#define PFX "igc: "
-
-#include <linux/timecounter.h>
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-
#include "igc_hw.h"
/* main */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 832da609d9a7..df40af759542 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -237,7 +237,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
- u32 ctrl_ext;
if (hw->phy.media_type != igc_media_type_copper) {
phy->type = igc_phy_none;
@@ -247,8 +246,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
phy->reset_delay_us = 100;
- ctrl_ext = rd32(IGC_CTRL_EXT);
-
/* set lan id */
hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
IGC_STATUS_FUNC_SHIFT;
@@ -287,8 +284,6 @@ out:
static s32 igc_get_invariants_base(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
- u32 link_mode = 0;
- u32 ctrl_ext = 0;
s32 ret_val = 0;
switch (hw->device_id) {
@@ -302,9 +297,6 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
hw->phy.media_type = igc_media_type_copper;
- ctrl_ext = rd32(IGC_CTRL_EXT);
- link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
-
/* mac initialization and operations */
ret_val = igc_init_mac_params_base(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9d85707e8a81..f20183037fb2 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -865,6 +865,8 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -959,8 +961,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- skb_tx_timestamp(skb);
-
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
@@ -1108,7 +1108,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
/* update pointers within the skb to store the data */
skb_reserve(skb, IGC_SKB_PAD);
- __skb_put(skb, size);
+ __skb_put(skb, size);
/* update buffer offset */
#if (PAGE_SIZE < 8192)
@@ -1160,9 +1160,9 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
(va + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
+ rx_buffer->page_offset ^= truesize;
#else
- rx_buffer->page_offset += truesize;
+ rx_buffer->page_offset += truesize;
#endif
} else {
rx_buffer->pagecnt_bias++;
@@ -1668,8 +1668,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
tx_buffer->next_to_watch,
jiffies,
tx_buffer->next_to_watch->wb.status);
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
/* we are about to reset, no point in enabling stuff */
return true;
@@ -1700,20 +1700,6 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
}
/**
- * igc_ioctl - I/O control method
- * @netdev: network interface device structure
- * @ifreq: frequency
- * @cmd: command
- */
-static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
*/
@@ -2866,11 +2852,13 @@ static int igc_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* If not enough Rx work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- igc_ring_irq_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igc_ring_irq_enable(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -3358,7 +3346,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
goto err_set_queues;
@@ -3445,7 +3433,6 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
- .ndo_do_ioctl = igc_ioctl,
};
/* PCIe configuration access */
@@ -3532,26 +3519,23 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- IGC_ERR("Wrong DMA configuration, aborting\n");
+ dev_err(&pdev->dev, "igc: Wrong DMA config\n");
goto err_dma;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 732b1e6ecc43..acba067cc15a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2206,7 +2206,8 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_FILTER))
return -EOPNOTSUPP;
if (ixgbe_wol_exclusion(adapter, wol))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index fd1b0546fd67..4d77f42e035c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -4,6 +4,7 @@
#include "ixgbe.h"
#include <net/xfrm.h>
#include <crypto/aead.h>
+#include <linux/if_bridge.h>
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
@@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
struct tx_sa tsa;
- if (adapter->num_vfs)
+ if (adapter->num_vfs &&
+ adapter->bridge_mode != BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
/* find the first unused index */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 113b38e0defb..49a4ea38eb07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6077,9 +6077,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* Disable Rx */
ixgbe_disable_rx(adapter);
- /* synchronize_sched() needed for pending XDP buffers to drain */
+ /* synchronize_rcu() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
- synchronize_sched();
+ synchronize_rcu();
ixgbe_irq_disable(adapter);
@@ -8269,6 +8269,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/*
* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
@@ -8646,8 +8648,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
}
}
- skb_tx_timestamp(skb);
-
#ifdef CONFIG_PCI_IOV
/*
* Use the l2switch_enable flag - would be false if the DMA
@@ -10476,7 +10476,7 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
ixgbe_disable_rxr_hw(adapter, rx_ring);
if (xdp_ring)
- synchronize_sched();
+ synchronize_rcu();
/* Rx/Tx/XDP Tx share the same napi context. */
napi_disable(&rx_ring->q_vector->napi);
@@ -10517,7 +10517,8 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
ixgbe_configure_rx_ring(adapter, rx_ring);
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
- clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+ if (xdp_ring)
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index b3e0d8bb5cbd..d81a50dc9535 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -443,22 +443,52 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
/**
- * ixgbe_ptp_gettime
+ * ixgbe_ptp_gettimex
* @ptp: the ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec to hold the PHC timestamp
+ * @sts: structure to hold the system time before and after reading the PHC
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
-static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
+ struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
- u64 ns;
+ u64 ns, stamp;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
- ns = timecounter_read(&adapter->hw_tc);
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ /* Upper 32 bits represent billions of cycles, lower 32 bits
+ * represent cycles. However, we use timespec64_to_ns for the
+ * correct math even though the units haven't been corrected
+ * yet.
+ */
+ ptp_read_system_prets(sts);
+ IXGBE_READ_REG(hw, IXGBE_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ ts->tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH);
+ stamp = timespec64_to_ns(ts);
+ break;
+ default:
+ ptp_read_system_prets(sts);
+ stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ ptp_read_system_postts(sts);
+ stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+ break;
+ }
+
+ ns = timecounter_cyc2time(&adapter->hw_tc, stamp);
+
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -567,10 +597,14 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
IXGBE_OVERFLOW_PERIOD);
- struct timespec64 ts;
+ unsigned long flags;
if (timeout) {
- ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
+ /* Update the timecounter */
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ timecounter_read(&adapter->hw_tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
adapter->last_overflow_check = jiffies;
}
}
@@ -1216,7 +1250,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540;
@@ -1233,7 +1267,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
@@ -1249,7 +1283,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
adapter->ptp_setup_sdp = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 5e47ede7e832..2de81f046fb5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1293,16 +1293,20 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
- /* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting == 1)
- ixgbevf_set_itr(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
- !test_bit(__IXGBEVF_REMOVING, &adapter->state))
- ixgbevf_irq_enable_queues(adapter,
- BIT(q_vector->v_idx));
- return 0;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (adapter->rx_itr_setting == 1)
+ ixgbevf_set_itr(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter,
+ BIT(q_vector->v_idx));
+ }
+
+ return min(work_done, budget - 1);
}
/**
@@ -4016,6 +4020,8 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1e9bcbdc6a90..2f427271a793 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1499,23 +1499,16 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
struct ethtool_link_ksettings *cmd)
{
struct net_device *dev = mp->dev;
- u32 supported, advertising;
phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
*/
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
- supported &= ~SUPPORTED_1000baseT_Half;
- advertising &= ~ADVERTISED_1000baseT_Half;
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.advertising);
return 0;
}
@@ -3031,10 +3024,12 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
phy->duplex = 0;
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ linkmode_copy(phy->advertising, phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phy->advertising);
} else {
phy->autoneg = AUTONEG_DISABLE;
- phy->advertising = 0;
+ linkmode_zero(phy->advertising);
phy->speed = speed;
phy->duplex = duplex;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e5397c8197b9..46a0f6b45d84 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4248,8 +4248,7 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
/* The Armada 37x documents do not give limits for this other than
* it being an 8-bit register. */
- if (eee->tx_lpi_enabled &&
- (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+ if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 12db256c8c9f..4c94571e03eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -31,6 +31,7 @@
* @resp: command response
* @link_info: link related information
* @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
* @cmd_pend: flag set before new command is started
* flag cleared after command response is received
* @cgx: parent cgx port
@@ -43,6 +44,7 @@ struct lmac {
u64 resp;
struct cgx_link_user_info link_info;
struct cgx_event_cb event_cb;
+ spinlock_t event_cb_lock;
bool cmd_pend;
struct cgx *cgx;
u8 lmac_id;
@@ -55,6 +57,8 @@ struct cgx {
u8 cgx_id;
u8 lmac_count;
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list;
};
@@ -66,6 +70,9 @@ static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
/* Convert firmware lmac type encoding to string */
static char *cgx_lmactype_string[LMAC_MODE_MAX];
+/* CGX PHY management internal APIs */
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
@@ -92,17 +99,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
return cgx->lmac_idmap[lmac_id];
}
-int cgx_get_cgx_cnt(void)
+int cgx_get_cgxcnt_max(void)
{
struct cgx *cgx_dev;
- int count = 0;
+ int idmax = -ENODEV;
list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
- count++;
+ if (cgx_dev->cgx_id > idmax)
+ idmax = cgx_dev->cgx_id;
+
+ if (idmax < 0)
+ return 0;
- return count;
+ return idmax + 1;
}
-EXPORT_SYMBOL(cgx_get_cgx_cnt);
+EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd)
{
@@ -445,6 +456,9 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
+ /* Ensure callback doesn't get unregistered until we finish it */
+ spin_lock(&lmac->event_cb_lock);
+
if (!lmac->event_cb.notify_link_chg) {
dev_dbg(dev, "cgx port %d:%d Link change handler null",
cgx->cgx_id, lmac->lmac_id);
@@ -455,11 +469,13 @@ static inline void cgx_link_change_handler(u64 lstat,
dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
cgx->cgx_id, lmac->lmac_id,
linfo->link_up ? "UP" : "DOWN", linfo->speed);
- return;
+ goto err;
}
if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
dev_err(dev, "event notification failure\n");
+err:
+ spin_unlock(&lmac->event_cb_lock);
}
static inline bool cgx_cmdresp_is_linkevent(u64 event)
@@ -548,6 +564,38 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
}
EXPORT_SYMBOL(cgx_lmac_evh_register);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
+{
+ struct lmac *lmac;
+ unsigned long flags;
+ struct cgx *cgx = cgxd;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lmac->event_cb_lock, flags);
+ lmac->event_cb.notify_link_chg = NULL;
+ lmac->event_cb.data = NULL;
+ spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_unregister);
+
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 req = 0;
+ u64 resp;
+
+ if (enable)
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
+ else
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
u64 req = 0;
@@ -581,6 +629,34 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
return 0;
}
+static void cgx_lmac_linkup_work(struct work_struct *work)
+{
+ struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
+ struct device *dev = &cgx->pdev->dev;
+ int i, err;
+
+ /* Do Link up for all the lmacs */
+ for (i = 0; i < cgx->lmac_count; i++) {
+ err = cgx_fwi_link_change(cgx, i, true);
+ if (err)
+ dev_info(dev, "cgx port %d:%d Link up command failed\n",
+ cgx->cgx_id, i);
+ }
+}
+
+int cgx_lmac_linkup_start(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_linkup_start);
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
@@ -602,6 +678,7 @@ static int cgx_lmac_init(struct cgx *cgx)
lmac->cgx = cgx;
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
+ spin_lock_init(&lmac->event_cb_lock);
err = request_irq(pci_irq_vector(cgx->pdev,
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
@@ -624,6 +701,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
struct lmac *lmac;
int i;
+ if (cgx->cgx_cmd_workq) {
+ flush_workqueue(cgx->cgx_cmd_workq);
+ destroy_workqueue(cgx->cgx_cmd_workq);
+ cgx->cgx_cmd_workq = NULL;
+ }
+
/* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) {
lmac = cgx->lmac_idmap[i];
@@ -679,8 +762,19 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & CGX_ID_MASK;
+
+ /* init wq for processing linkup requests */
+ INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ if (!cgx->cgx_cmd_workq) {
+ dev_err(dev, "alloc workqueue failed for cgx cmd");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
list_add(&cgx->cgx_list, &cgx_list);
- cgx->cgx_id = cgx_get_cgx_cnt() - 1;
cgx_link_usertable_init();
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 0a66d2717442..8c2be8493321 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -20,40 +20,41 @@
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
-#define MAX_CGX 3
+#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */
#define CGXX_CMRX_CFG 0x00
-#define CMR_EN BIT_ULL(55)
-#define DATA_PKT_TX_EN BIT_ULL(53)
-#define DATA_PKT_RX_EN BIT_ULL(54)
-#define CGX_LMAC_TYPE_SHIFT 40
-#define CGX_LMAC_TYPE_MASK 0xF
+#define CMR_EN BIT_ULL(55)
+#define DATA_PKT_TX_EN BIT_ULL(53)
+#define DATA_PKT_RX_EN BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT 40
+#define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040
-#define FW_CGX_INT BIT_ULL(1)
+#define FW_CGX_INT BIT_ULL(1)
#define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128
#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
-#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
-#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
-#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
-#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
+#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 0x200
-#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
-#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
#define CGXX_SPUX_CONTROL1 0x10000
-#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
+#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
-#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
@@ -94,11 +95,12 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver;
-int cgx_get_cgx_cnt(void);
+int cgx_get_cgxcnt_max(void);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
@@ -108,4 +110,5 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
+int cgx_lmac_linkup_start(void *cgxd);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index fa17af3f4ba7..2d9fe51c6616 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -78,8 +78,6 @@ enum cgx_cmd_id {
CGX_CMD_LINK_STATE_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
- CGX_CMD_IRQ_ENABLE,
- CGX_CMD_IRQ_DISABLE,
};
/* async event ids */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d39ada404c8f..9bddb032dd7e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,6 +143,11 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5,
};
+/* Min/Max packet sizes, excluding FCS */
+#define NIC_HW_MIN_FRS 40
+#define NIC_HW_MAX_FRS 9212
+#define SDP_HW_MAX_FRS 65535
+
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
#define NIX_RX_ACTIONOP_UCAST (0x1ull)
@@ -169,7 +174,9 @@ enum nix_scheduler {
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
+#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 85ba24a05774..d6f9ed8ea966 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(otx2_mbox_nonempty);
const char *otx2_mbox_id2name(u16 id)
{
switch (id) {
-#define M(_name, _id, _1, _2) case _id: return # _name;
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
#undef M
default:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a15a59c9a239..292c13a901df 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -120,54 +120,93 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
#define MBOX_MESSAGES \
/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
-M(READY, 0x001, msg_req, ready_msg_rsp) \
-M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
-M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
-M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
+M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
+M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
+M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
-M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
-M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
-M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
-M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
+M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
-M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
-M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
-M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
-M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
-M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
-M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
-M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
-M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
+M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
-M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
-M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
-M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
-M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
+ npa_lf_alloc_req, npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
+M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
+ npc_mcam_alloc_entry_rsp) \
+M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \
+ npc_mcam_free_entry_req, msg_rsp) \
+M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \
+ npc_mcam_write_entry_req, msg_rsp) \
+M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\
+ npc_mcam_shift_entry_rsp) \
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \
+ npc_mcam_alloc_counter_req, \
+ npc_mcam_alloc_counter_rsp) \
+M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \
+ npc_mcam_unmap_counter_req, msg_rsp) \
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \
+ npc_mcam_oper_counter_req, \
+ npc_mcam_oper_counter_rsp) \
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
+ npc_mcam_alloc_and_write_entry_req, \
+ npc_mcam_alloc_and_write_entry_rsp) \
+M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
+ msg_req, npc_get_kex_cfg_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
-M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
-M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
-M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
-M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
-M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
-M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
-M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \
-M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \
-M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \
-M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \
-M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \
-M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp)
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
+ nix_lf_alloc_req, nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
+ hwctx_disable_req, msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
+ nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
+M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg, msg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
+M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
+M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
+M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
+M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
-M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
enum {
-#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
#undef M
@@ -191,6 +230,13 @@ struct msg_rsp {
struct mbox_msghdr hdr;
};
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+ RVU_INVALID_VF_ID = -256,
+};
+
struct ready_msg_rsp {
struct mbox_msghdr hdr;
u16 sclk_feq; /* SCLK frequency */
@@ -347,6 +393,8 @@ struct hwctx_disable_req {
u8 ctype;
};
+/* NIX mbox message formats */
+
/* NIX mailbox error codes
* Range 401 - 500.
*/
@@ -365,6 +413,8 @@ enum nix_af_status {
NIX_AF_INVAL_TXSCHQ_CFG = -412,
NIX_AF_SMQ_FLUSH_FAILED = -413,
NIX_AF_ERR_LF_RESET = -414,
+ NIX_AF_INVAL_NPA_PF_FUNC = -419,
+ NIX_AF_INVAL_SSO_PF_FUNC = -420,
};
/* For NIX LF context alloc and init */
@@ -392,6 +442,10 @@ struct nix_lf_alloc_rsp {
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
u8 mac_addr[ETH_ALEN];
+ u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+ u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+ u16 cints; /* NIX_AF_CONST2::CINTS */
+ u16 qints; /* NIX_AF_CONST2::QINTS */
};
/* NIX AQ enqueue msg */
@@ -472,6 +526,7 @@ struct nix_txschq_config {
struct nix_vtag_config {
struct mbox_msghdr hdr;
+ /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
u8 vtag_size;
/* cfg_type is '0' for tx vlan cfg
* cfg_type is '1' for rx vlan cfg
@@ -492,7 +547,7 @@ struct nix_vtag_config {
/* valid when cfg_type is '1' */
struct {
- /* rx vtag type index */
+ /* rx vtag type index, valid values are in 0..7 range */
u8 vtag_type;
/* rx vtag strip */
u8 strip_vtag :1;
@@ -522,4 +577,159 @@ struct nix_rx_mode {
u16 mode;
};
+struct nix_frs_cfg {
+ struct mbox_msghdr hdr;
+ u8 update_smq; /* Update SMQ's min/max lens */
+ u8 update_minlen; /* Set minlen also */
+ u8 sdp_link; /* Set SDP RX link */
+ u16 maxlen;
+ u16 minlen;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID 0xFFFF
+#define NPC_MCAM_INVALID_MAP 0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+ NPC_MCAM_INVALID_REQ = -701,
+ NPC_MCAM_ALLOC_DENIED = -702,
+ NPC_MCAM_ALLOC_FAILED = -703,
+ NPC_MCAM_PERM_DENIED = -704,
+};
+
+struct npc_mcam_alloc_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES 256
+ u8 contig; /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO 0
+#define NPC_MCAM_LOWER_PRIO 1
+#define NPC_MCAM_HIGHER_PRIO 2
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u16 ref_entry;
+ u16 count; /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of entries allocated */
+ u16 free_count; /* Number of entries available */
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry index to be freed */
+ u8 all; /* If all entries allocated to this PFVF to be freed */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
+ u64 kw[NPC_MAX_KWS_IN_KEY];
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ u64 action;
+ u64 vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 entry; /* MCAM entry to write this match key */
+ u16 cntr; /* Counter for this MCAM entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 set_cntr; /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+};
+
+struct npc_mcam_shift_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS 64
+ u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 new_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
+};
+
+struct npc_mcam_alloc_counter_req {
+ struct mbox_msghdr hdr;
+ u8 contig; /* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS 64
+ u16 count; /* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Counter allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of counters allocated */
+ u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* valid only while fetching counter's stats */
+};
+
+struct npc_mcam_unmap_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr;
+ u16 entry; /* Entry and counter to be unmapped */
+ u8 all; /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 ref_entry;
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 alloc_cntr; /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
+ u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF 2
+#define NPC_MAX_LID 8
+#define NPC_MAX_LT 16
+#define NPC_MAX_LD 2
+#define NPC_MAX_LFL 16
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index f98b0113def3..a7a20afb3ba0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -259,4 +259,10 @@ struct nix_rx_action {
#endif
};
+/* NIX Receive Vtag Action Structure */
+#define VTAG0_VALID_BIT BIT_ULL(15)
+#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index dc28fa2b9481..4d061d971956 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -29,6 +29,16 @@ static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *));
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
/* Supported devices */
static const struct pci_device_id rvu_id_table[] = {
@@ -153,17 +163,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
u16 match = 0;
int lf;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lf = 0; lf < block->lf.max; lf++) {
if (block->fn_map[lf] == pcifunc) {
if (slot == match) {
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return lf;
}
match++;
}
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return -ENODEV;
}
@@ -337,6 +347,28 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
return &rvu->pf[rvu_get_pf(pcifunc)];
}
+static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
+{
+ int pf, vf, nvfs;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ if (pf >= rvu->hw->total_pfs)
+ return false;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return true;
+
+ /* Check if VF is within number of VFs attached to this PF */
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ nvfs = (cfg >> 12) & 0xFF;
+ if (vf >= nvfs)
+ return false;
+
+ return true;
+}
+
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
{
struct rvu_block *block;
@@ -597,6 +629,8 @@ static void rvu_free_hw_resources(struct rvu *rvu)
dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
+
+ mutex_destroy(&rvu->rsrc_lock);
}
static int rvu_setup_hw_resources(struct rvu *rvu)
@@ -752,7 +786,7 @@ init:
if (!rvu->hwvf)
return -ENOMEM;
- spin_lock_init(&rvu->rsrc_lock);
+ mutex_init(&rvu->rsrc_lock);
err = rvu_setup_msix_resources(rvu);
if (err)
@@ -777,17 +811,26 @@ init:
err = rvu_npc_init(rvu);
if (err)
- return err;
+ goto exit;
+
+ err = rvu_cgx_init(rvu);
+ if (err)
+ goto exit;
err = rvu_npa_init(rvu);
if (err)
- return err;
+ goto cgx_err;
err = rvu_nix_init(rvu);
if (err)
- return err;
+ goto cgx_err;
return 0;
+
+cgx_err:
+ rvu_cgx_exit(rvu);
+exit:
+ return err;
}
/* NPA and NIX admin queue APIs */
@@ -830,7 +873,7 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
return 0;
}
-static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
+static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
{
return 0;
@@ -858,6 +901,22 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
return 0;
}
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf;
+
+ if (!is_pf_func_valid(rvu, pcifunc))
+ return false;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Check if this PFFUNC has a LF of type blktype attached */
+ if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+ return false;
+
+ return true;
+}
+
static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
int pcifunc, int slot)
{
@@ -926,7 +985,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
struct rvu_block *block;
int blkid;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
/* Check for partial resource detach */
if (detach && detach->partial)
@@ -956,11 +1015,11 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
rvu_detach_block(rvu, pcifunc, block->type);
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return 0;
}
-static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
+static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
struct rsrc_detach *detach,
struct msg_rsp *rsp)
{
@@ -1108,7 +1167,7 @@ fail:
return -ENOSPC;
}
-static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
+static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
{
@@ -1119,7 +1178,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
if (!attach->modify)
rvu_detach_rsrcs(rvu, NULL, pcifunc);
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
/* Check if the request can be accommodated */
err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
@@ -1163,7 +1222,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
}
exit:
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return err;
}
@@ -1231,7 +1290,7 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
}
-static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
+static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct msix_offset_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1280,22 +1339,51 @@ static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
return 0;
}
-static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
+static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 vf, numvfs;
+ u64 cfg;
+
+ vf = pcifunc & RVU_PFVF_FUNC_MASK;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ numvfs = (cfg >> 12) & 0xFF;
+
+ if (vf && vf <= numvfs)
+ __rvu_flr_handler(rvu, pcifunc);
+ else
+ return RVU_INVALID_VF_ID;
+
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
+ struct rvu *rvu = pci_get_drvdata(mbox->pdev);
+
/* Check if valid, if not reply with a invalid msg */
if (req->sig != OTX2_MBOX_REQ_SIG)
goto bad_message;
switch (req->id) {
-#define M(_name, _id, _req_type, _rsp_type) \
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
case _id: { \
struct _rsp_type *rsp; \
int err; \
\
rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
- &rvu->mbox, devid, \
+ mbox, devid, \
sizeof(struct _rsp_type)); \
+ /* some handlers should complete even if reply */ \
+ /* could not be allocated */ \
+ if (!rsp && \
+ _id != MBOX_MSG_DETACH_RESOURCES && \
+ _id != MBOX_MSG_NIX_TXSCH_FREE && \
+ _id != MBOX_MSG_VF_FLR) \
+ return -ENOMEM; \
if (rsp) { \
rsp->hdr.id = _id; \
rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
@@ -1303,9 +1391,9 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
rsp->hdr.rc = 0; \
} \
\
- err = rvu_mbox_handler_ ## _name(rvu, \
- (struct _req_type *)req, \
- rsp); \
+ err = rvu_mbox_handler_ ## _fn_name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
if (rsp && err) \
rsp->hdr.rc = err; \
\
@@ -1313,29 +1401,38 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
}
MBOX_MESSAGES
#undef M
- break;
+
bad_message:
default:
- otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
- req->id);
+ otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
return -ENODEV;
}
}
-static void rvu_mbox_handler(struct work_struct *work)
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
{
- struct rvu_work *mwork = container_of(work, struct rvu_work, work);
struct rvu *rvu = mwork->rvu;
+ int offset, err, id, devid;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *req_hdr;
struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
struct otx2_mbox *mbox;
- int offset, id, err;
- u16 pf;
- mbox = &rvu->mbox;
- pf = mwork - rvu->mbox_wrk;
- mdev = &mbox->dev[pf];
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
+
+ devid = mwork - mw->mbox_wrk;
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[devid];
/* Process received mbox messages */
req_hdr = mdev->mbase + mbox->rx_start;
@@ -1347,10 +1444,21 @@ static void rvu_mbox_handler(struct work_struct *work)
for (id = 0; id < req_hdr->num_msgs; id++) {
msg = mdev->mbase + offset;
- /* Set which PF sent this message based on mbox IRQ */
- msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
- msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
- err = rvu_process_mbox_msg(rvu, pf, msg);
+ /* Set which PF/VF sent this message based on mbox IRQ */
+ switch (type) {
+ case TYPE_AFPF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ break;
+ case TYPE_AFVF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
+ break;
+ }
+
+ err = rvu_process_mbox_msg(mbox, devid, msg);
if (!err) {
offset = mbox->rx_start + msg->next_msgoff;
continue;
@@ -1358,31 +1466,57 @@ static void rvu_mbox_handler(struct work_struct *work)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
- err, otx2_mbox_id2name(msg->id), msg->id, pf,
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, devid,
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
- err, otx2_mbox_id2name(msg->id), msg->id, pf);
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, devid);
}
- /* Send mbox responses to PF */
- otx2_mbox_msg_send(mbox, pf);
+ /* Send mbox responses to VF/PF */
+ otx2_mbox_msg_send(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFPF);
}
-static void rvu_mbox_up_handler(struct work_struct *work)
+static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFVF);
+}
+
+static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+{
struct rvu *rvu = mwork->rvu;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
struct otx2_mbox *mbox;
- int offset, id;
- u16 pf;
+ int offset, id, devid;
- mbox = &rvu->mbox_up;
- pf = mwork - rvu->mbox_wrk_up;
- mdev = &mbox->dev[pf];
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
+
+ devid = mwork - mw->mbox_wrk_up;
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[devid];
rsp_hdr = mdev->mbase + mbox->rx_start;
if (rsp_hdr->num_msgs == 0) {
@@ -1423,128 +1557,182 @@ end:
mdev->msgs_acked++;
}
- otx2_mbox_reset(mbox, 0);
+ otx2_mbox_reset(mbox, devid);
}
-static int rvu_mbox_init(struct rvu *rvu)
+static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
{
- struct rvu_hwinfo *hw = rvu->hw;
- void __iomem *hwbase = NULL;
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFVF);
+}
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *))
+{
+ void __iomem *hwbase = NULL, *reg_base;
+ int err, i, dir, dir_up;
struct rvu_work *mwork;
+ const char *name;
u64 bar4_addr;
- int err, pf;
- rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- hw->total_pfs);
- if (!rvu->mbox_wq)
+ switch (type) {
+ case TYPE_AFPF:
+ name = "rvu_afpf_mailbox";
+ bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+ dir = MBOX_DIR_AFPF;
+ dir_up = MBOX_DIR_AFPF_UP;
+ reg_base = rvu->afreg_base;
+ break;
+ case TYPE_AFVF:
+ name = "rvu_afvf_mailbox";
+ bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ dir = MBOX_DIR_PFVF;
+ dir_up = MBOX_DIR_PFVF_UP;
+ reg_base = rvu->pfreg_base;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mw->mbox_wq = alloc_workqueue(name,
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ num);
+ if (!mw->mbox_wq)
return -ENOMEM;
- rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
- sizeof(struct rvu_work), GFP_KERNEL);
- if (!rvu->mbox_wrk) {
+ mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk) {
err = -ENOMEM;
goto exit;
}
- rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
- sizeof(struct rvu_work), GFP_KERNEL);
- if (!rvu->mbox_wrk_up) {
+ mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk_up) {
err = -ENOMEM;
goto exit;
}
- /* Map mbox region shared with PFs */
- bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
/* Mailbox is a reserved memory (in RAM) region shared between
* RVU devices, shouldn't be mapped as device memory to allow
* unaligned accesses.
*/
- hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
+ hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
if (!hwbase) {
dev_err(rvu->dev, "Unable to map mailbox region\n");
err = -ENOMEM;
goto exit;
}
- err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
- MBOX_DIR_AFPF, hw->total_pfs);
+ err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
if (err)
goto exit;
- err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
- MBOX_DIR_AFPF_UP, hw->total_pfs);
+ err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
+ reg_base, dir_up, num);
if (err)
goto exit;
- for (pf = 0; pf < hw->total_pfs; pf++) {
- mwork = &rvu->mbox_wrk[pf];
+ for (i = 0; i < num; i++) {
+ mwork = &mw->mbox_wrk[i];
mwork->rvu = rvu;
- INIT_WORK(&mwork->work, rvu_mbox_handler);
- }
+ INIT_WORK(&mwork->work, mbox_handler);
- for (pf = 0; pf < hw->total_pfs; pf++) {
- mwork = &rvu->mbox_wrk_up[pf];
+ mwork = &mw->mbox_wrk_up[i];
mwork->rvu = rvu;
- INIT_WORK(&mwork->work, rvu_mbox_up_handler);
+ INIT_WORK(&mwork->work, mbox_up_handler);
}
return 0;
exit:
if (hwbase)
iounmap((void __iomem *)hwbase);
- destroy_workqueue(rvu->mbox_wq);
+ destroy_workqueue(mw->mbox_wq);
return err;
}
-static void rvu_mbox_destroy(struct rvu *rvu)
+static void rvu_mbox_destroy(struct mbox_wq_info *mw)
{
- if (rvu->mbox_wq) {
- flush_workqueue(rvu->mbox_wq);
- destroy_workqueue(rvu->mbox_wq);
- rvu->mbox_wq = NULL;
+ if (mw->mbox_wq) {
+ flush_workqueue(mw->mbox_wq);
+ destroy_workqueue(mw->mbox_wq);
+ mw->mbox_wq = NULL;
}
- if (rvu->mbox.hwbase)
- iounmap((void __iomem *)rvu->mbox.hwbase);
+ if (mw->mbox.hwbase)
+ iounmap((void __iomem *)mw->mbox.hwbase);
- otx2_mbox_destroy(&rvu->mbox);
- otx2_mbox_destroy(&rvu->mbox_up);
+ otx2_mbox_destroy(&mw->mbox);
+ otx2_mbox_destroy(&mw->mbox_up);
}
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
{
- struct rvu *rvu = (struct rvu *)rvu_irq;
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+ }
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
u64 intr;
- u8 pf;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
/* Clear interrupts */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
/* Sync with mbox memory region */
- smp_wmb();
+ rmb();
- for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
- if (intr & (1ULL << pf)) {
- mbox = &rvu->mbox;
- mdev = &mbox->dev[pf];
- hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(rvu->mbox_wq,
- &rvu->mbox_wrk[pf].work);
- mbox = &rvu->mbox_up;
- mdev = &mbox->dev[pf];
- hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(rvu->mbox_wq,
- &rvu->mbox_wrk_up[pf].work);
- }
+ rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
+ vfs -= 64;
}
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
+
return IRQ_HANDLED;
}
@@ -1561,6 +1749,216 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
INTR_MASK(hw->total_pfs) & ~1ULL);
}
+static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+{
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int err;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->type);
+ if (!num_lfs)
+ return;
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (lf < 0)
+ continue;
+
+ /* Cleanup LF and reset it */
+ if (block->addr == BLKADDR_NIX0)
+ rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
+ else if (block->addr == BLKADDR_NPA)
+ rvu_npa_lf_teardown(rvu, pcifunc, lf);
+
+ err = rvu_lf_reset(rvu, block, lf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, lf);
+ }
+ }
+}
+
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ mutex_lock(&rvu->flr_lock);
+ /* Reset order should reflect inter-block dependencies:
+ * 1. Reset any packet/work sources (NIX, CPT, TIM)
+ * 2. Flush and reset SSO/SSOW
+ * 3. Cleanup pools (NPA)
+ */
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ mutex_unlock(&rvu->flr_lock);
+}
+
+static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
+{
+ int reg = 0;
+
+ /* pcifunc = 0(PF0) | (vf + 1) */
+ __rvu_flr_handler(rvu, vf + 1);
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+
+ /* Signal FLR finish and enable IRQ */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static void rvu_flr_handler(struct work_struct *work)
+{
+ struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = flrwork->rvu;
+ u16 pcifunc, numvfs, vf;
+ u64 cfg;
+ int pf;
+
+ pf = flrwork - rvu->flr_wrk;
+ if (pf >= rvu->hw->total_pfs) {
+ rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
+ return;
+ }
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+
+ for (vf = 0; vf < numvfs; vf++)
+ __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+
+ __rvu_flr_handler(rvu, pcifunc);
+
+ /* Signal FLR finish */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
+
+ /* Enable interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
+}
+
+static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
+{
+ int dev, vf, reg = 0;
+ u64 intr;
+
+ if (start_vf >= 64)
+ reg = 1;
+
+ intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ return;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf + rvu->hw->total_pfs;
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
+ /* Clear and disable the interrupt */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+ }
+}
+
+static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
+ if (!intr)
+ goto afvf_flr;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* PF is already dead do only AF related operations */
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
+ BIT_ULL(pf));
+ /* Disable the interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ BIT_ULL(pf));
+ }
+ }
+
+afvf_flr:
+ rvu_afvf_queue_flr_work(rvu, 0, 64);
+ if (rvu->vfs > 64)
+ rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
+{
+ int vf;
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (vf = 0; vf < 64; vf++) {
+ if (intr & (1ULL << vf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
+ /* clear interrupt */
+ rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
+ }
+ }
+}
+
+/* Handles ME interrupts from VFs of AF */
+static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfset;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ for (vfset = 0; vfset <= 1; vfset++) {
+ intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
+ if (intr)
+ rvu_me_handle_vfset(rvu, vfset, intr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handles ME interrupts from PFs */
+static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
+ BIT_ULL(pf));
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
+ BIT_ULL(pf));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
@@ -1569,6 +1967,14 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ /* Disable the PF FLR interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Disable the PF ME interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
for (irq = 0; irq < rvu->num_vec; irq++) {
if (rvu->irq_allocated[irq])
free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
@@ -1578,9 +1984,25 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu->num_vec = 0;
}
+static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[0];
+ int offset;
+
+ pfvf = &rvu->pf[0];
+ offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Make sure there are enough MSIX vectors configured so that
+ * VF interrupts can be handled. Offset equal to zero means
+ * that PF vectors are not configured and overlapping AF vectors.
+ */
+ return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
+ offset;
+}
+
static int rvu_register_interrupts(struct rvu *rvu)
{
- int ret;
+ int ret, offset, pf_vec_start;
rvu->num_vec = pci_msix_vec_count(rvu->pdev);
@@ -1620,13 +2042,323 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Enable mailbox interrupts from all PFs */
rvu_enable_mbox_intr(rvu);
+ /* Register FLR interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ "RVUAF FLR");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for FLR\n");
+ goto fail;
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
+
+ /* Enable FLR interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Register ME interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ "RVUAF ME");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
+ rvu_me_pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for ME\n");
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+
+ /* Enable ME interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu))
+ return 0;
+
+ /* Get PF MSIX vectors offset. */
+ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register FLR interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ /* Register ME interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
return 0;
fail:
- pci_free_irq_vectors(rvu->pdev);
+ rvu_unregister_interrupts(rvu);
+ return ret;
+}
+
+static void rvu_flr_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->flr_wq) {
+ flush_workqueue(rvu->flr_wq);
+ destroy_workqueue(rvu->flr_wq);
+ rvu->flr_wq = NULL;
+ }
+}
+
+static int rvu_flr_init(struct rvu *rvu)
+{
+ int dev, num_devs;
+ u64 cfg;
+ int pf;
+
+ /* Enable FLR for all PFs*/
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
+ cfg | BIT_ULL(22));
+ }
+
+ rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ 1);
+ if (!rvu->flr_wq)
+ return -ENOMEM;
+
+ num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
+ rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->flr_wrk) {
+ destroy_workqueue(rvu->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (dev = 0; dev < num_devs; dev++) {
+ rvu->flr_wrk[dev].rvu = rvu;
+ INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
+ }
+
+ mutex_init(&rvu->flr_lock);
+
+ return 0;
+}
+
+static void rvu_disable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+static void rvu_enable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ /* Mbox */
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
+
+static int lbk_get_num_chans(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int ret = -EIO;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
+ NULL);
+ if (!pdev)
+ goto err;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ /* Read number of available LBK channels from LBK(0)_CONST register. */
+ ret = (readq(base + 0x10) >> 32) & 0xffff;
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+err:
return ret;
}
+static int rvu_enable_sriov(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ int err, chans, vfs;
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
+ dev_warn(&pdev->dev,
+ "Skipping SRIOV enablement since not enough IRQs are available\n");
+ return 0;
+ }
+
+ chans = lbk_get_num_chans();
+ if (chans < 0)
+ return chans;
+
+ vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Limit VFs in case we have more VFs than LBK channels available. */
+ if (vfs > chans)
+ vfs = chans;
+
+ /* AF's VFs work in pairs and talk over consecutive loopback channels.
+ * Thus we want to enable maximum even number of VFs. In case
+ * odd number of VFs are available then the last VF on the list
+ * remains disabled.
+ */
+ if (vfs & 0x1) {
+ dev_warn(&pdev->dev,
+ "Number of VFs should be even. Enabling %d out of %d.\n",
+ vfs - 1, vfs);
+ vfs--;
+ }
+
+ if (!vfs)
+ return 0;
+
+ /* Save VFs number for reference in VF interrupts handlers.
+ * Since interrupts might start arriving during SRIOV enablement
+ * ordinary API cannot be used to get number of enabled VFs.
+ */
+ rvu->vfs = vfs;
+
+ err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
+ rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
+ if (err)
+ return err;
+
+ rvu_enable_afvf_intr(rvu);
+ /* Make sure IRQs are enabled before SRIOV. */
+ mb();
+
+ err = pci_enable_sriov(pdev, vfs);
+ if (err) {
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rvu_disable_sriov(struct rvu *rvu)
+{
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ pci_disable_sriov(rvu->pdev);
+}
+
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -1689,24 +2421,35 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_release_regions;
- err = rvu_mbox_init(rvu);
+ /* Init mailbox btw AF and PFs */
+ err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
+ rvu->hw->total_pfs, rvu_afpf_mbox_handler,
+ rvu_afpf_mbox_up_handler);
if (err)
goto err_hwsetup;
- err = rvu_cgx_probe(rvu);
+ err = rvu_flr_init(rvu);
if (err)
goto err_mbox;
err = rvu_register_interrupts(rvu);
if (err)
- goto err_cgx;
+ goto err_flr;
+
+ /* Enable AF's VFs (if any) */
+ err = rvu_enable_sriov(rvu);
+ if (err)
+ goto err_irq;
return 0;
-err_cgx:
- rvu_cgx_wq_destroy(rvu);
+err_irq:
+ rvu_unregister_interrupts(rvu);
+err_flr:
+ rvu_flr_wq_destroy(rvu);
err_mbox:
- rvu_mbox_destroy(rvu);
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup:
+ rvu_cgx_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
err_release_regions:
@@ -1725,8 +2468,10 @@ static void rvu_remove(struct pci_dev *pdev)
struct rvu *rvu = pci_get_drvdata(pdev);
rvu_unregister_interrupts(rvu);
- rvu_cgx_wq_destroy(rvu);
- rvu_mbox_destroy(rvu);
+ rvu_flr_wq_destroy(rvu);
+ rvu_cgx_exit(rvu);
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
+ rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 2c0580cd2807..ae8e2e206c87 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -11,6 +11,7 @@
#ifndef RVU_H
#define RVU_H
+#include <linux/pci.h>
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
@@ -18,6 +19,9 @@
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_96XX 0xB200
+
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
#define PCI_PF_REG_BAR_NUM 2
@@ -64,7 +68,7 @@ struct nix_mcast {
struct qmem *mcast_buf;
int replay_pkind;
int next_free_mce;
- spinlock_t mce_lock; /* Serialize MCE updates */
+ struct mutex mce_lock; /* Serialize MCE updates */
};
struct nix_mce_list {
@@ -74,15 +78,27 @@ struct nix_mce_list {
};
struct npc_mcam {
- spinlock_t lock; /* MCAM entries and counters update lock */
+ struct rsrc_bmap counters;
+ struct mutex lock; /* MCAM entries and counters update lock */
+ unsigned long *bmap; /* bitmap, 0 => bmap_entries */
+ unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */
+ u16 bmap_entries; /* Number of unreserved MCAM entries */
+ u16 bmap_fcnt; /* MCAM entries free count */
+ u16 *entry2pfvf_map;
+ u16 *entry2cntr_map;
+ u16 *cntr2pfvf_map;
+ u16 *cntr_refcnt;
u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */
u16 banksize; /* Number of MCAM entries in each bank */
u16 total_entries; /* Total number of MCAM entries */
- u16 entries; /* Total minus reserved for NIX LFs */
u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */
u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */
+ u16 lprio_count;
+ u16 lprio_start;
+ u16 hprio_count;
+ u16 hprio_end;
};
/* Structure for per RVU func info ie PF/VF */
@@ -122,12 +138,19 @@ struct rvu_pfvf {
u16 tx_chan_base;
u8 rx_chan_cnt; /* total number of RX channels */
u8 tx_chan_cnt; /* total number of TX channels */
+ u16 maxlen;
+ u16 minlen;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
struct nix_mce_list bcast_mce_list;
+
+ /* VLAN offload */
+ struct mcam_entry entry;
+ int rxvlan_index;
+ bool rxvlan;
};
struct nix_txsch {
@@ -164,6 +187,16 @@ struct rvu_hwinfo {
struct npc_mcam mcam;
};
+struct mbox_wq_info {
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+
+ struct otx2_mbox mbox_up;
+ struct rvu_work *mbox_wrk_up;
+
+ struct workqueue_struct *mbox_wq;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -172,14 +205,17 @@ struct rvu {
struct rvu_hwinfo *hw;
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
- spinlock_t rsrc_lock; /* Serialize resource alloc/free */
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+ int vfs; /* Number of VFs attached to RVU */
/* Mbox */
- struct otx2_mbox mbox;
- struct rvu_work *mbox_wrk;
- struct otx2_mbox mbox_up;
- struct rvu_work *mbox_wrk_up;
- struct workqueue_struct *mbox_wq;
+ struct mbox_wq_info afpf_wq_info;
+ struct mbox_wq_info afvf_wq_info;
+
+ /* PF FLR */
+ struct rvu_work *flr_wrk;
+ struct workqueue_struct *flr_wq;
+ struct mutex flr_lock; /* Serialize FLRs */
/* MSI-X */
u16 num_vec;
@@ -190,7 +226,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
u8 cgx_mapped_pfs;
- u8 cgx_cnt; /* available cgx ports */
+ u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
@@ -223,9 +259,22 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
+static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x00) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+}
+
/* Function Prototypes
* RVU
*/
+static inline int is_afvf(u16 pcifunc)
+{
+ return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
@@ -236,6 +285,7 @@ int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
@@ -266,89 +316,100 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
-int rvu_cgx_probe(struct rvu *rvu);
-void rvu_cgx_wq_destroy(struct rvu *rvu);
+int rvu_cgx_init(struct rvu *rvu);
+int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp);
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp);
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
-int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
-int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NIX APIs */
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
int rvu_nix_init(struct rvu *rvu);
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
-int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp);
-int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -360,9 +421,48 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, bool allmulti);
void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+ struct npc_mcam_free_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+ struct npc_mcam_write_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+ struct npc_mcam_shift_entry_req *req,
+ struct npc_mcam_shift_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+ struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct npc_mcam_oper_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_and_write_entry_req *req,
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp);
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+ struct npc_get_kex_cfg_rsp *rsp);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 188185c15b4a..7d7133c5f799 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -20,14 +20,14 @@ struct cgx_evq_entry {
struct cgx_link_event link_event;
};
-#define M(_name, _id, _req_type, _rsp_type) \
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
static struct _req_type __maybe_unused \
-*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
{ \
struct _req_type *req; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
- &rvu->mbox_up, devid, sizeof(struct _req_type), \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
sizeof(struct _rsp_type)); \
if (!req) \
return NULL; \
@@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
{
- if (cgx_id >= rvu->cgx_cnt)
+ if (cgx_id >= rvu->cgx_cnt_max)
return NULL;
return rvu->cgx_idmap[cgx_id];
@@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
- int cgx_cnt = rvu->cgx_cnt;
+ int cgx_cnt_max = rvu->cgx_cnt_max;
int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
int size, free_pkind;
- if (!cgx_cnt)
+ if (!cgx_cnt_max)
return 0;
- if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
- size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
- rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
- /* Initialize offset 0 with an invalid cgx and lmac id */
- rvu->pf2cgxlmac_map[0] = 0xFF;
+ /* Initialize all entries with an invalid cgx and lmac id */
+ memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
rvu->cgx_mapped_pfs = 0;
- for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+ if (!rvu_cgx_pdata(cgx, rvu))
+ continue;
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -177,12 +179,12 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
}
/* Send mbox message to PF */
- msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
if (!msg)
continue;
msg->link_info = *linfo;
- otx2_mbox_msg_send(&rvu->mbox_up, pfid);
- err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
if (err)
dev_warn(rvu->dev, "notification to pf %d failed\n",
pfid);
@@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work)
} while (1);
}
-static void cgx_lmac_event_handler_init(struct rvu *rvu)
+static int cgx_lmac_event_handler_init(struct rvu *rvu)
{
struct cgx_event_cb cb;
int cgx, lmac, err;
@@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed");
- return;
+ return -ENOMEM;
}
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
cb.data = rvu;
- for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
@@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
cgx, lmac);
}
}
+
+ return 0;
}
-void rvu_cgx_wq_destroy(struct rvu *rvu)
+static void rvu_cgx_wq_destroy(struct rvu *rvu)
{
if (rvu->cgx_evh_wq) {
flush_workqueue(rvu->cgx_evh_wq);
@@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu)
}
}
-int rvu_cgx_probe(struct rvu *rvu)
+int rvu_cgx_init(struct rvu *rvu)
{
- int i, err;
+ int cgx, err;
+ void *cgxd;
- /* find available cgx ports */
- rvu->cgx_cnt = cgx_get_cgx_cnt();
- if (!rvu->cgx_cnt) {
+ /* CGX port id starts from 0 and are not necessarily contiguous
+ * Hence we allocate resources based on the maximum port id value.
+ */
+ rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+ if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
return -ENODEV;
}
- rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
- GFP_KERNEL);
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+ sizeof(void *), GFP_KERNEL);
if (!rvu->cgx_idmap)
return -ENOMEM;
/* Initialize the cgxdata table */
- for (i = 0; i < rvu->cgx_cnt; i++)
- rvu->cgx_idmap[i] = cgx_get_pdata(i);
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
+ rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
/* Map CGX LMAC interfaces to RVU PFs */
err = rvu_map_cgx_lmac_pf(rvu);
@@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu)
return err;
/* Register for CGX events */
- cgx_lmac_event_handler_init(rvu);
+ err = cgx_lmac_event_handler_init(rvu);
+ if (err)
+ return err;
+
+ /* Ensure event handler registration is completed, before
+ * we turn on the links
+ */
+ mb();
+
+ /* Do link up for all CGX ports */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ err = cgx_lmac_linkup_start(cgxd);
+ if (err)
+ dev_err(rvu->dev,
+ "Link up process failed to start on cgx %d\n",
+ cgx);
+ }
+
+ return 0;
+}
+
+int rvu_cgx_exit(struct rvu *rvu)
+{
+ int cgx, lmac;
+ void *cgxd;
+
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+ cgx_lmac_evh_unregister(cgxd, lmac);
+ }
+
+ /* Ensure event handler unregister is completed */
+ mb();
+
+ rvu_cgx_wq_destroy(rvu);
return 0;
}
@@ -303,21 +352,21 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
return 0;
}
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -354,7 +403,7 @@ int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
@@ -368,7 +417,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
@@ -387,7 +436,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
@@ -407,7 +456,7 @@ int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
@@ -451,21 +500,21 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
return 0;
}
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
return 0;
}
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp)
{
u8 cgx_id, lmac_id;
@@ -500,14 +549,14 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
lmac_id, en);
}
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5ab7eff2301..962a82f7d141 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -55,6 +55,17 @@ struct mce {
u16 pcifunc;
};
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return false;
+ return true;
+}
+
int rvu_get_nixlf_count(struct rvu *rvu)
{
struct rvu_block *block;
@@ -94,6 +105,23 @@ static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
return NULL;
}
+static void nix_rx_sync(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ /*Sync all in flight RX packets to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "NIX RX software sync failed\n");
+
+ /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
+ * bit too early. Hence wait for 50us more.
+ */
+ if (is_rvu_9xxx_A0(rvu))
+ usleep_range(50, 60);
+}
+
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
@@ -109,12 +137,12 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
if (schq >= txsch->schq.max)
return false;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
if (txsch->pfvf_map[schq] != pcifunc) {
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return false;
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return true;
}
@@ -122,7 +150,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
u8 cgx_id, lmac_id;
- int pkind, pf;
+ int pkind, pf, vf;
int err;
pf = rvu_get_pf(pcifunc);
@@ -148,6 +176,14 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_npc_set_pkind(rvu, pkind, pfvf);
break;
case NIX_INTF_TYPE_LBK:
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
+ pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
+ NIX_CHAN_LBK_CHX(0, vf + 1);
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, false);
break;
}
@@ -168,14 +204,21 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
+ pfvf->maxlen = NIC_HW_MIN_FRS;
+ pfvf->minlen = NIC_HW_MIN_FRS;
return 0;
}
static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int err;
+ pfvf->maxlen = 0;
+ pfvf->minlen = 0;
+ pfvf->rxvlan = false;
+
/* Remove this PF_FUNC from bcast pkt replication list */
err = nix_update_bcast_mce_list(rvu, pcifunc, false);
if (err) {
@@ -637,25 +680,25 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
-int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
-int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
return nix_lf_hwctx_disable(rvu, req);
}
-int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp)
{
- int nixlf, qints, hwctx_size, err, rc = 0;
+ int nixlf, qints, hwctx_size, intf, err, rc = 0;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
@@ -676,6 +719,24 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
+ if (req->npa_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
+ return NIX_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
+ if (req->sso_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
+ return NIX_AF_INVAL_SSO_PF_FUNC;
+ }
+
/* If RSS is being enabled, check if requested config is valid.
* RSS table size should be power of two, otherwise
* RSS_GRP::OFFSET + adder might go beyond that group or
@@ -780,18 +841,10 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
/* Enable LMTST for this NIX LF */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
- /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
- * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
- * PCIFUNC itself.
- */
- if (req->npa_func == RVU_DEFAULT_PF_FUNC)
- cfg = pcifunc;
- else
+ /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
+ if (req->npa_func)
cfg = req->npa_func;
-
- if (req->sso_func == RVU_DEFAULT_PF_FUNC)
- cfg |= (u64)pcifunc << 16;
- else
+ if (req->sso_func)
cfg |= (u64)req->sso_func << 16;
cfg |= (u64)req->xqe_sz << 33;
@@ -800,10 +853,14 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
/* Config Rx pkt length, csum checks and apad enable / disable */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
- err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
+ intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ err = nix_interface_init(rvu, pcifunc, intf, nixlf);
if (err)
goto free_mem;
+ /* Disable NPC entries as NIXLF's contexts are not initialized yet */
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+
goto exit;
free_mem:
@@ -823,10 +880,18 @@ exit:
rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ /* Get HW supported stat count */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
+ rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
+ /* Get count of CQ IRQs and error IRQs supported per LF */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ rsp->qints = ((cfg >> 12) & 0xFFF);
+ rsp->cints = ((cfg >> 24) & 0xFFF);
return rc;
}
-int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -918,7 +983,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
-int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp)
{
@@ -939,7 +1004,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
if (!nix_hw)
return -EINVAL;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl];
@@ -995,7 +1060,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
exit:
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return rc;
}
@@ -1020,7 +1085,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return NIX_AF_ERR_AF_LF_INVALID;
/* Disable TL2/3 queue links before SMQ flush*/
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
continue;
@@ -1062,7 +1127,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
txsch->pfvf_map[schq] = 0;
}
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
@@ -1073,7 +1138,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return 0;
}
-int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp)
{
@@ -1118,7 +1183,7 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true;
}
-int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
{
@@ -1181,35 +1246,22 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
struct nix_vtag_config *req)
{
- u64 regval = 0;
-
-#define NIX_VTAGTYPE_MAX 0x8ull
-#define NIX_VTAGSIZE_MASK 0x7ull
-#define NIX_VTAGSTRIP_CAP_MASK 0x30ull
+ u64 regval = req->vtag_size;
- if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
- req->vtag_size > VTAGSIZE_T8)
+ if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
- regval = rvu_read64(rvu, blkaddr,
- NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
-
- if (req->rx.strip_vtag && req->rx.capture_vtag)
- regval |= BIT_ULL(4) | BIT_ULL(5);
- else if (req->rx.strip_vtag)
+ if (req->rx.capture_vtag)
+ regval |= BIT_ULL(5);
+ if (req->rx.strip_vtag)
regval |= BIT_ULL(4);
- else
- regval &= ~(BIT_ULL(4) | BIT_ULL(5));
-
- regval &= ~NIX_VTAGSIZE_MASK;
- regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
rvu_write64(rvu, blkaddr,
NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
return 0;
}
-int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp)
{
@@ -1294,7 +1346,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
/* Add a new one to the list, at the tail */
- mce = kzalloc(sizeof(*mce), GFP_ATOMIC);
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
if (!mce)
return -ENOMEM;
mce->idx = idx;
@@ -1317,6 +1369,10 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
struct rvu_pfvf *pfvf;
int blkaddr;
+ /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
+ if (is_afvf(pcifunc))
+ return 0;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return 0;
@@ -1340,7 +1396,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL;
}
- spin_lock(&mcast->mce_lock);
+ mutex_lock(&mcast->mce_lock);
err = nix_update_mce_list(mce_list, pcifunc, idx, add);
if (err)
@@ -1370,7 +1426,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
}
end:
- spin_unlock(&mcast->mce_lock);
+ mutex_unlock(&mcast->mce_lock);
return err;
}
@@ -1455,7 +1511,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
BIT_ULL(63) | (mcast->replay_pkind << 24) |
BIT_ULL(20) | MC_BUF_CNT);
- spin_lock_init(&mcast->mce_lock);
+ mutex_init(&mcast->mce_lock);
return nix_setup_bcast_tables(rvu, nix_hw);
}
@@ -1506,7 +1562,7 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
return 0;
}
-int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1564,7 +1620,7 @@ static int get_flowkey_alg_idx(u32 flow_cfg)
return FLOW_KEY_ALG_PORT;
}
-int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
struct msg_rsp *rsp)
{
@@ -1720,7 +1776,7 @@ static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
}
}
-int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
@@ -1742,10 +1798,13 @@ int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
return 0;
}
-int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
bool allmulti = false, disable_promisc = false;
@@ -1775,9 +1834,261 @@ int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
else
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, allmulti);
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
return 0;
}
+static void nix_find_link_frs(struct rvu *rvu,
+ struct nix_frs_cfg *req, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct rvu_pfvf *pfvf;
+ int maxlen, minlen;
+ int numvfs, hwvf;
+ int vf;
+
+ /* Update with requester's min/max lengths */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ pfvf->maxlen = req->maxlen;
+ if (req->update_minlen)
+ pfvf->minlen = req->minlen;
+
+ maxlen = req->maxlen;
+ minlen = req->update_minlen ? req->minlen : 0;
+
+ /* Get this PF's numVFs and starting hwvf */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ /* For each VF, compare requested max/minlen */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+ }
+
+ /* Compare requested max/minlen with PF's max/minlen */
+ pfvf = &rvu->pf[pf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+
+ /* Update the request with max/min PF's and it's VF's max/min */
+ req->maxlen = maxlen;
+ if (req->update_minlen)
+ req->minlen = minlen;
+}
+
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ int blkaddr, schq, link = -1;
+ struct nix_txsch *txsch;
+ u64 cfg, lmac_fifo_len;
+ struct nix_hw *nix_hw;
+ u8 cgx = 0, lmac = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ /* Check if requester wants to update SMQ's */
+ if (!req->update_smq)
+ goto rx_frscfg;
+
+ /* Update min/maxlen in each of the SMQ attached to this PF/VF */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ mutex_lock(&rvu->rsrc_lock);
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
+ if (req->update_minlen)
+ cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+rx_frscfg:
+ /* Check if config is for SDP link */
+ if (req->sdp_link) {
+ if (!hw->sdp_links)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+ link = hw->cgx_links + hw->lbk_links;
+ goto linkcfg;
+ }
+
+ /* Check if the request is from CGX mapped RVU PF */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ /* Get CGX and LMAC to which this PF is mapped and find link */
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+ link = (cgx * hw->lmac_per_cgx) + lmac;
+ } else if (pf == 0) {
+ /* For VFs of PF0 ingress is LBK port, so config LBK link */
+ link = hw->cgx_links;
+ }
+
+ if (link < 0)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+
+ nix_find_link_frs(rvu, req, pcifunc);
+
+linkcfg:
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+ cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+ if (req->update_minlen)
+ cfg = (cfg & ~0xFFFFULL) | req->minlen;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+
+ if (req->sdp_link || pf == 0)
+ return 0;
+
+ /* Update transmit credits for CGX links */
+ lmac_fifo_len =
+ CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
+ cfg &= ~(0xFFFFFULL << 12);
+ cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
+ rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+ rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
+ struct npc_mcam_free_entry_req free_req = { };
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ /* LBK VFs do not have separate MCAM UCAST entry hence
+ * skip allocating rxvlan for them
+ */
+ if (is_afvf(pcifunc))
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->rxvlan)
+ return 0;
+
+ /* alloc new mcam entry */
+ alloc_req.hdr.pcifunc = pcifunc;
+ alloc_req.count = 1;
+
+ err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (err)
+ return err;
+
+ /* update entry to enable rxvlan offload */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ err = NIX_AF_ERR_AF_LF_INVALID;
+ goto free_entry;
+ }
+
+ nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0) {
+ err = NIX_AF_ERR_AF_LF_INVALID;
+ goto free_entry;
+ }
+
+ pfvf->rxvlan_index = alloc_rsp.entry_list[0];
+ /* all it means is that rxvlan_index is valid */
+ pfvf->rxvlan = true;
+
+ err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ if (err)
+ goto free_entry;
+
+ return 0;
+free_entry:
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.entry = alloc_rsp.entry_list[0];
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
+ pfvf->rxvlan = false;
+ return err;
+}
+
+static void nix_link_config(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int cgx, lmac_cnt, slink, link;
+ u64 tx_credits;
+
+ /* Set default min/max packet lengths allowed on NIX Rx links.
+ *
+ * With HW reset minlen value of 60byte, HW will treat ARP pkts
+ * as undersize and report them to SW as error pkts, hence
+ * setting it to 40 bytes.
+ */
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
+ if (hw->sdp_links) {
+ link = hw->cgx_links + hw->lbk_links;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
+ /* Set credits for Tx links assuming max packet length allowed.
+ * This will be reconfigured based on MTU set for PF/VF.
+ */
+ for (cgx = 0; cgx < hw->cgx; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ slink = cgx * hw->lmac_per_cgx;
+ for (link = slink; link < (slink + lmac_cnt); link++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link),
+ tx_credits);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_EXPR_CREDIT(link),
+ tx_credits);
+ }
+ }
+
+ /* Set Tx credits for LBK link */
+ slink = hw->cgx_links;
+ for (link = slink; link < (slink + hw->lbk_links); link++) {
+ tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+ /* Enable credits and set credit pkt count to max allowed */
+ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
+ }
+}
+
static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
{
int idx, err;
@@ -1796,8 +2107,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
/* Check if CGX devices are ready */
- for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
- if (status & (BIT_ULL(16 + idx)))
+ for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
+ /* Skip when cgx port is not available */
+ if (!rvu_cgx_pdata(idx, rvu) ||
+ (status & (BIT_ULL(16 + idx))))
continue;
dev_err(rvu->dev,
"CGX%d didn't respond to NIX X2P calibration\n", idx);
@@ -1830,10 +2143,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Set admin queue endianness */
cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
#ifdef __BIG_ENDIAN
- cfg |= BIT_ULL(1);
+ cfg |= BIT_ULL(8);
rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
#else
- cfg &= ~BIT_ULL(1);
+ cfg &= ~BIT_ULL(8);
rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
#endif
@@ -1870,6 +2183,14 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
block = &hw->block[blkaddr];
+ /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
+ if (is_rvu_9xxx_A0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
if (err)
@@ -1922,6 +2243,9 @@ int rvu_nix_init(struct rvu *rvu)
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+
+ /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
+ nix_link_config(rvu, blkaddr);
}
return 0;
}
@@ -1955,5 +2279,88 @@ void rvu_nix_freemem(struct rvu *rvu)
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf);
+ mutex_destroy(&mcast->mce_lock);
+ }
+}
+
+static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (*nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return err;
+
+ rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return err;
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ return 0;
+}
+
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+ int err;
+
+ ctx_req.hdr.pcifunc = pcifunc;
+
+ /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+ nix_rx_sync(rvu, blkaddr);
+ nix_txschq_free(rvu, pcifunc);
+
+ if (pfvf->sq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_SQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "SQ ctx disable failed\n");
+ }
+
+ if (pfvf->rq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_RQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "RQ ctx disable failed\n");
+ }
+
+ if (pfvf->cq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_CQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+
+ nix_ctx_free(rvu, pfvf);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 7531fdc54fa1..c0e165dfc403 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -241,14 +241,14 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
-int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
-int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
@@ -273,7 +273,7 @@ static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
pfvf->npa_qints_ctx = NULL;
}
-int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp)
{
@@ -372,7 +372,7 @@ exit:
return rc;
}
-int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -470,3 +470,20 @@ void rvu_npa_freemem(struct rvu *rvu)
block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
}
+
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+
+ /* Disable all pools */
+ ctx_req.hdr.pcifunc = pcifunc;
+ ctx_req.ctype = NPA_AQ_CTYPE_POOL;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ /* Disable all auras */
+ ctx_req.ctype = NPA_AQ_CTYPE_AURA;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ npa_ctx_free(rvu, pfvf);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 23ff47f7efc5..6ea2b0e2df42 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -26,13 +27,10 @@
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
-struct mcam_entry {
-#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
- u64 kw[NPC_MAX_KWS_IN_KEY];
- u64 kw_mask[NPC_MAX_KWS_IN_KEY];
- u64 action;
- u64 vtag_action;
-};
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc);
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
@@ -256,6 +254,46 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
}
+static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, u16 dest)
+{
+ int dbank = npc_get_bank(mcam, dest);
+ int sbank = npc_get_bank(mcam, src);
+ u64 cfg, sreg, dreg;
+ int bank, i;
+
+ src &= (mcam->banksize - 1);
+ dest &= (mcam->banksize - 1);
+
+ /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */
+ for (bank = 0; bank < mcam->banks_per_entry; bank++) {
+ sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0);
+ dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0);
+ for (i = 0; i < 6; i++) {
+ cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8));
+ rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg);
+ }
+ }
+
+ /* Copy action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg);
+
+ /* Copy TAG action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg);
+
+ /* Enable or disable */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
+}
+
static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index)
{
@@ -269,12 +307,17 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry entry = { {0} };
struct nix_rx_action action;
int blkaddr, index, kwi;
u64 mac = 0;
+ /* AF's VFs work in promiscuous mode */
+ if (is_afvf(pcifunc))
+ return;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
@@ -308,6 +351,17 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
+
+ /* add VLAN matching, setup action and save entry back for later */
+ entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
+ entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
+
+ entry.vtag_action = VTAG0_VALID_BIT |
+ FIELD_PREP(VTAG0_TYPE_MASK, 0) |
+ FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(VTAG0_RELPTR_MASK, 12);
+
+ memcpy(&pfvf->entry, &entry, sizeof(entry));
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
@@ -315,15 +369,15 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry entry = { {0} };
- struct nix_rx_action action;
+ struct nix_rx_action action = { };
int blkaddr, index, kwi;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
+ /* Only PF or AF VF can add a promiscuous entry */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
return;
- /* Only PF or AF VF can add a promiscuous entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
@@ -347,7 +401,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
NIX_INTF_RX, &entry, true);
}
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -362,7 +417,17 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
}
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
@@ -390,9 +455,28 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Check for L2B bit and LMAC channel */
- entry.kw[0] = BIT_ULL(25) | chan;
- entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL;
+ /* Check for L2B bit and LMAC channel
+ * NOTE: Since MKEX default profile(a reduced version intended to
+ * accommodate more capability but igoring few bits) a stap-gap
+ * approach.
+ * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
+ * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
+ * on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
+ * DSCP.
+ *
+ * Reduced layout of MKEX default profile -
+ * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
+ *
+ * BIT_POS[31:28] : LD
+ * BIT_POS[27:24] : LC
+ * BIT_POS[23:20] : LB
+ * BIT_POS[19:16] : LA
+ * BIT_POS[15:12] : L3B, L3M, L2B, L2M
+ * BIT_POS[11:00] : CHAN
+ *
+ */
+ entry.kw[0] = BIT_ULL(13) | chan;
+ entry.kw_mask[0] = ~entry.kw[0] & (BIT_ULL(13) | 0xFFFULL);
*(u64 *)&action = 0x00;
#ifdef MCAST_MCE
@@ -454,51 +538,95 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
-void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_rx_action action;
- int blkaddr, index, bank;
+ int index, bank, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- /* Disable ucast MCAM match entry of this PF/VF */
+ /* Ucast MCAM match entry of this PF/VF */
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
- /* For PF, disable promisc and bcast MCAM match entries */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_BCAST_ENTRY);
- /* For bcast, disable only if it's action is not
- * packet replication, incase if action is replication
- * then this PF's nixlf is removed from bcast replication
- * list.
- */
- bank = npc_get_bank(mcam, index);
- index &= (mcam->banksize - 1);
- *(u64 *)&action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
- if (action.op != NIX_RX_ACTIONOP_MCAST)
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ /* For PF, ena/dis promisc and bcast MCAM match entries */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+ /* For bcast, enable/disable only if it's action is not
+ * packet replication, incase if action is replication
+ * then this PF's nixlf is removed from bcast replication
+ * list.
+ */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
+ if (action.op != NIX_RX_ACTIONOP_MCAST)
+ npc_enable_mcam_entry(rvu, mcam,
+ blkaddr, index, enable);
+ if (enable)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
+ else
rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
- }
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
-#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Disable and free all MCAM entries mapped to this 'pcifunc' */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+
+ /* Free all MCAM counters mapped to this 'pcifunc' */
+ npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+
+ mutex_unlock(&mcam->lock);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
-#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+
static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
@@ -514,28 +642,66 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
*/
for (lid = 0; lid < lid_count; lid++) {
for (ltype = 0; ltype < 16; ltype++) {
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL);
-
- LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL);
+ SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
+ SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
+ SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
+ SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
+
+ SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
}
}
- /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts
- * then 112bit key is not sufficient
- */
if (mcam->keysize != NPC_MCAM_KEY_X2)
return;
- /* Start placing extracted data/flags from 64bit onwards, for now */
- /* Extract DMAC from the packet */
- cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET;
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+ /* Default MCAM KEX profile */
+ /* Layer A: Ethernet: */
+
+ /* DMAC: 6 bytes, KW1[47:0] */
+ cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
+
+ /* Layer B: Single VLAN (CTAG) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
+
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
+
+ /* Layer C: IPv4 */
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
+ /* TOS: 1 byte, KW1[63:56] */
+ cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
+
+ /* Layer D:UDP */
+ /* SPORT: 2 bytes, KW3[15:0] */
+ cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
+ /* DPORT: 2 bytes, KW3[31:16] */
+ cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
+
+ /* Layer D:TCP */
+ /* SPORT: 2 bytes, KW3[15:0] */
+ cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
+ /* DPORT: 2 bytes, KW3[31:16] */
+ cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
@@ -690,13 +856,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
{
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
- int rsvd;
+ int rsvd, err;
u64 cfg;
/* Get HW limits */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
mcam->banks = (cfg >> 44) & 0xF;
mcam->banksize = (cfg >> 28) & 0xFFFF;
+ mcam->counters.max = (cfg >> 48) & 0xFFFF;
/* Actual number of MCAM entries vary by entry size */
cfg = (rvu_read64(rvu, blkaddr,
@@ -728,20 +895,82 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
return -ENOMEM;
}
- mcam->entries = mcam->total_entries - rsvd;
- mcam->nixlf_offset = mcam->entries;
+ mcam->bmap_entries = mcam->total_entries - rsvd;
+ mcam->nixlf_offset = mcam->bmap_entries;
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
- spin_lock_init(&mcam->lock);
+ /* Allocate bitmaps for managing MCAM entries */
+ mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap)
+ return -ENOMEM;
+
+ mcam->bmap_reverse = devm_kcalloc(rvu->dev,
+ BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap_reverse)
+ return -ENOMEM;
+
+ mcam->bmap_fcnt = mcam->bmap_entries;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2pfvf_map)
+ return -ENOMEM;
+
+ /* Reserve 1/8th of MCAM entries at the bottom for low priority
+ * allocations and another 1/8th at the top for high priority
+ * allocations.
+ */
+ mcam->lprio_count = mcam->bmap_entries / 8;
+ if (mcam->lprio_count > BITS_PER_LONG)
+ mcam->lprio_count = round_down(mcam->lprio_count,
+ BITS_PER_LONG);
+ mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
+ mcam->hprio_count = mcam->lprio_count;
+ mcam->hprio_end = mcam->hprio_count;
+
+ /* Allocate bitmap for managing MCAM counters and memory
+ * for saving counter to RVU PFFUNC allocation mapping.
+ */
+ err = rvu_alloc_bitmap(&mcam->counters);
+ if (err)
+ return err;
+
+ mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr2pfvf_map)
+ goto free_mem;
+
+ /* Alloc memory for MCAM entry to counter mapping and for tracking
+ * counter's reference count.
+ */
+ mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2cntr_map)
+ goto free_mem;
+
+ mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr_refcnt)
+ goto free_mem;
+
+ mutex_init(&mcam->lock);
return 0;
+
+free_mem:
+ kfree(mcam->counters.bmap);
+ return -ENOMEM;
}
int rvu_npc_init(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
u64 keyz = NPC_MCAM_KEY_X2;
- int blkaddr, err;
+ int blkaddr, entry, bank, err;
+ u64 cfg, nibble_ena;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
@@ -749,6 +978,14 @@ int rvu_npc_init(struct rvu *rvu)
return -ENODEV;
}
+ /* First disable all MCAM entries, to stop traffic towards NIXLFs */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
+ for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
+ }
+
/* Allocate resource bimap for pkind*/
pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
NPC_AF_CONST1) >> 12) & 0xFF;
@@ -780,13 +1017,18 @@ int rvu_npc_init(struct rvu *rvu)
BIT_ULL(6) | BIT_ULL(2));
/* Set RX and TX side MCAM search key size.
- * Also enable parse key extract nibbles suchthat except
- * layer E to H, rest of the key is included for MCAM search.
+ * LA..LD (ltype only) + Channel
*/
+ nibble_ena = 0x49247;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ ((keyz & 0x3) << 32) | nibble_ena);
+ /* Due to an errata (35786) in A0 pass silicon, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (!is_rvu_9xxx_A0(rvu))
+ nibble_ena = (1ULL << 19) - 1;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ ((keyz & 0x3) << 32) | nibble_ena);
err = npc_mcam_rsrcs_init(rvu, blkaddr);
if (err)
@@ -811,6 +1053,1019 @@ int rvu_npc_init(struct rvu *rvu)
void rvu_npc_freemem(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
kfree(pkind->rsrc.bmap);
+ kfree(mcam->counters.bmap);
+ mutex_destroy(&mcam->lock);
+}
+
+static int npc_mcam_verify_entry(struct npc_mcam *mcam,
+ u16 pcifunc, int entry)
+{
+ /* Verify if entry is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (entry >= mcam->bmap_entries)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->entry2pfvf_map[entry])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static int npc_mcam_verify_counter(struct npc_mcam *mcam,
+ u16 pcifunc, int cntr)
+{
+ /* Verify if counter is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (cntr >= mcam->counters.max)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->cntr2pfvf_map[cntr])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u16 bank = npc_get_bank(mcam, entry);
+
+ /* Set mapping and increment counter's refcnt */
+ mcam->entry2cntr_map[entry] = cntr;
+ mcam->cntr_refcnt[cntr]++;
+ /* Enable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
+ BIT_ULL(9) | cntr);
+}
+
+static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
+ struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u16 bank = npc_get_bank(mcam, entry);
+
+ /* Remove mapping and reduce counter's refcnt */
+ mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr]--;
+ /* Disable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00);
+}
+
+/* Sets MCAM entry in bitmap as used. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __set_bit(entry, mcam->bmap);
+ __set_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt--;
+}
+
+/* Sets MCAM entry in bitmap as free. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __clear_bit(entry, mcam->bmap);
+ __clear_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt++;
+}
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc)
+{
+ u16 index, cntr;
+
+ /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2pfvf_map[index] == pcifunc) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ /* Free the entry in bitmap */
+ npc_mcam_clear_bit(mcam, index);
+ /* Disable the entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[index];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam,
+ blkaddr, index,
+ cntr);
+ }
+ }
+}
+
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc)
+{
+ u16 cntr;
+
+ /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr] = 0;
+ rvu_free_rsrc(&mcam->counters, cntr);
+ /* This API is expected to be called after freeing
+ * MCAM entries, which inturn will remove
+ * 'entry to counter' mapping.
+ * No need to do it again.
+ */
+ }
+ }
+}
+
+/* Find area of contiguous free entries of size 'nr'.
+ * If not found return max contiguous free entries available.
+ */
+static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start,
+ u16 nr, u16 *max_area)
+{
+ u16 max_area_start = 0;
+ u16 index, next, end;
+
+ *max_area = 0;
+
+again:
+ index = find_next_zero_bit(map, size, start);
+ if (index >= size)
+ return max_area_start;
+
+ end = ((index + nr) >= size) ? size : index + nr;
+ next = find_next_bit(map, end, index);
+ if (*max_area < (next - index)) {
+ *max_area = next - index;
+ max_area_start = index;
+ }
+
+ if (next < end) {
+ start = next + 1;
+ goto again;
+ }
+
+ return max_area_start;
+}
+
+/* Find number of free MCAM entries available
+ * within range i.e in between 'start' and 'end'.
+ */
+static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end)
+{
+ u16 index, next;
+ u16 fcnt = 0;
+
+again:
+ if (start >= end)
+ return fcnt;
+
+ index = find_next_zero_bit(map, end, start);
+ if (index >= end)
+ return fcnt;
+
+ next = find_next_bit(map, end, index);
+ if (next <= end) {
+ fcnt += next - index;
+ start = next + 1;
+ goto again;
+ }
+
+ fcnt += end - index;
+ return fcnt;
+}
+
+static void
+npc_get_mcam_search_range_priority(struct npc_mcam *mcam,
+ struct npc_mcam_alloc_entry_req *req,
+ u16 *start, u16 *end, bool *reverse)
+{
+ u16 fcnt;
+
+ if (req->priority == NPC_MCAM_HIGHER_PRIO)
+ goto hprio;
+
+ /* For a low priority entry allocation
+ * - If reference entry is not in hprio zone then
+ * search range: ref_entry to end.
+ * - If reference entry is in hprio zone and if
+ * request can be accomodated in non-hprio zone then
+ * search range: 'start of middle zone' to 'end'
+ * - else search in reverse, so that less number of hprio
+ * zone entries are allocated.
+ */
+
+ *reverse = false;
+ *start = req->ref_entry + 1;
+ *end = mcam->bmap_entries;
+
+ if (req->ref_entry >= mcam->hprio_end)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->bmap_entries);
+ if (fcnt > req->count)
+ *start = mcam->hprio_end;
+ else
+ *reverse = true;
+ return;
+
+hprio:
+ /* For a high priority entry allocation, search is always
+ * in reverse to preserve hprio zone entries.
+ * - If reference entry is not in lprio zone then
+ * search range: 0 to ref_entry.
+ * - If reference entry is in lprio zone and if
+ * request can be accomodated in middle zone then
+ * search range: 'hprio_end' to 'lprio_start'
+ */
+
+ *reverse = true;
+ *start = 0;
+ *end = req->ref_entry;
+
+ if (req->ref_entry <= mcam->lprio_start)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->lprio_start);
+ if (fcnt < req->count)
+ return;
+ *start = mcam->hprio_end;
+ *end = mcam->lprio_start;
+}
+
+static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+ u16 fcnt, hp_fcnt, lp_fcnt;
+ u16 start, end, index;
+ int entry, next_start;
+ bool reverse = false;
+ unsigned long *bmap;
+ u16 max_contig;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if there are any free entries */
+ if (!mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ /* MCAM entries are divided into high priority, middle and
+ * low priority zones. Idea is to not allocate top and lower
+ * most entries as much as possible, this is to increase
+ * probability of honouring priority allocation requests.
+ *
+ * Two bitmaps are used for mcam entry management,
+ * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'.
+ * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'.
+ *
+ * Reverse bitmap is used to allocate entries
+ * - when a higher priority entry is requested
+ * - when available free entries are less.
+ * Lower priority ones out of avaialble free entries are always
+ * chosen when 'high vs low' question arises.
+ */
+
+ /* Get the search range for priority allocation request */
+ if (req->priority) {
+ npc_get_mcam_search_range_priority(mcam, req,
+ &start, &end, &reverse);
+ goto alloc;
+ }
+
+ /* Find out the search range for non-priority allocation request
+ *
+ * Get MCAM free entry count in middle zone.
+ */
+ lp_fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->lprio_start,
+ mcam->bmap_entries);
+ hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end);
+ fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt;
+
+ /* Check if request can be accomodated in the middle zone */
+ if (fcnt > req->count) {
+ start = mcam->hprio_end;
+ end = mcam->lprio_start;
+ } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) {
+ /* Expand search zone from half of hprio zone to
+ * half of lprio zone.
+ */
+ start = mcam->hprio_end / 2;
+ end = mcam->bmap_entries - (mcam->lprio_count / 2);
+ reverse = true;
+ } else {
+ /* Not enough free entries, search all entries in reverse,
+ * so that low priority ones will get used up.
+ */
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ }
+
+alloc:
+ if (reverse) {
+ bmap = mcam->bmap_reverse;
+ start = mcam->bmap_entries - start;
+ end = mcam->bmap_entries - end;
+ index = start;
+ start = end;
+ end = index;
+ } else {
+ bmap = mcam->bmap;
+ }
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous entries, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(bmap, end, start,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ if (reverse)
+ rsp->entry = mcam->bmap_entries - index - max_contig;
+ else
+ rsp->entry = index;
+ } else {
+ /* Allocate requested number of non-contiguous entries,
+ * if unsuccessful allocate as many as possible.
+ */
+ rsp->count = 0;
+ next_start = start;
+ for (entry = 0; entry < req->count; entry++) {
+ index = find_next_zero_bit(bmap, end, next_start);
+ if (index >= end)
+ break;
+
+ next_start = start + (index - start) + 1;
+
+ /* Save the entry's index */
+ if (reverse)
+ index = mcam->bmap_entries - index - 1;
+ entry_list[entry] = index;
+ rsp->count++;
+ }
+ }
+
+ /* If allocating requested no of entries is unsucessful,
+ * expand the search range to full bitmap length and retry.
+ */
+ if (!req->priority && (rsp->count < req->count) &&
+ ((end - start) != mcam->bmap_entries)) {
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ goto alloc;
+ }
+
+ /* For priority entry allocation requests, if allocation is
+ * failed then expand search to max possible range and retry.
+ */
+ if (req->priority && rsp->count < req->count) {
+ if (req->priority == NPC_MCAM_LOWER_PRIO &&
+ (start != (req->ref_entry + 1))) {
+ start = req->ref_entry + 1;
+ end = mcam->bmap_entries;
+ reverse = false;
+ goto alloc;
+ } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) &&
+ ((end - start) != req->ref_entry)) {
+ start = 0;
+ end = req->ref_entry;
+ reverse = true;
+ goto alloc;
+ }
+ }
+
+ /* Copy MCAM entry indices into mbox response entry_list.
+ * Requester always expects indices in ascending order, so
+ * so reverse the list if reverse bitmap is used for allocation.
+ */
+ if (!req->contig && rsp->count) {
+ index = 0;
+ for (entry = rsp->count - 1; entry >= 0; entry--) {
+ if (reverse)
+ rsp->entry_list[index++] = entry_list[entry];
+ else
+ rsp->entry_list[entry] = entry_list[entry];
+ }
+ }
+
+ /* Mark the allocated entries as used and set nixlf mapping */
+ for (entry = 0; entry < rsp->count; entry++) {
+ index = req->contig ?
+ (rsp->entry + entry) : rsp->entry_list[entry];
+ npc_mcam_set_bit(mcam, index);
+ mcam->entry2pfvf_map[index] = pcifunc;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ /* Update available free count in mbox response */
+ rsp->free_count = mcam->bmap_fcnt;
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ rsp->entry = NPC_MCAM_ENTRY_INVALID;
+ rsp->free_count = 0;
+
+ /* Check if ref_entry is within range */
+ if (req->priority && req->ref_entry >= mcam->bmap_entries)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* ref_entry can't be '0' if requested priority is high.
+ * Can't be last entry if requested priority is low.
+ */
+ if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+ ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+ req->priority == NPC_MCAM_LOWER_PRIO))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated indices needs to be sent to requester,
+ * max number of non-contiguous entries per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Alloc request from PFFUNC with no NIXLF attached should be denied */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_ALLOC_DENIED;
+
+ return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
+}
+
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+ struct npc_mcam_free_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc = 0;
+ u16 cntr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Free request from PFFUNC with no NIXLF attached, ignore */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ if (req->all)
+ goto free_all;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ mcam->entry2pfvf_map[req->entry] = 0;
+ npc_mcam_clear_bit(mcam, req->entry);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[req->entry];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, cntr);
+
+ goto exit;
+
+free_all:
+ /* Free up all entries allocated to requesting PFFUNC */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+ struct npc_mcam_write_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ if (req->set_cntr &&
+ npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->set_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+
+ rc = 0;
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+ struct npc_mcam_shift_entry_req *req,
+ struct npc_mcam_shift_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 old_entry, new_entry;
+ u16 index, cntr;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (req->shift_count > NPC_MCAM_MAX_SHIFTS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < req->shift_count; index++) {
+ old_entry = req->curr_entry[index];
+ new_entry = req->new_entry[index];
+
+ /* Check if both old and new entries are valid and
+ * does belong to this PFFUNC or not.
+ */
+ rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry);
+ if (rc)
+ break;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry);
+ if (rc)
+ break;
+
+ /* new_entry should not have a counter mapped */
+ if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) {
+ rc = NPC_MCAM_PERM_DENIED;
+ break;
+ }
+
+ /* Disable the new_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false);
+
+ /* Copy rule from old entry to new entry */
+ npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry);
+
+ /* Copy counter mapping, if any */
+ cntr = mcam->entry2cntr_map[old_entry];
+ if (cntr != NPC_MCAM_INVALID_MAP) {
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ old_entry, cntr);
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ new_entry, cntr);
+ }
+
+ /* Enable new_entry and disable old_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false);
+ }
+
+ /* If shift has failed then report the failed index */
+ if (index != req->shift_count) {
+ rc = NPC_MCAM_PERM_DENIED;
+ rsp->failed_entry_idx = index;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 max_contig, cntr;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* If the request is from a PFFUNC with no NIXLF attached, ignore */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated counter IDs needs to be sent to requester,
+ * max number of non-contiguous counters per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if unused counters are available or not */
+ if (!rvu_rsrc_free_count(&mcam->counters)) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ rsp->count = 0;
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous counters, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(mcam->counters.bmap,
+ mcam->counters.max, 0,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ rsp->cntr = index;
+ for (cntr = index; cntr < (index + max_contig); cntr++) {
+ __set_bit(cntr, mcam->counters.bmap);
+ mcam->cntr2pfvf_map[cntr] = pcifunc;
+ }
+ } else {
+ /* Allocate requested number of non-contiguous counters,
+ * if unsuccessful allocate as many as possible.
+ */
+ for (cntr = 0; cntr < req->count; cntr++) {
+ index = rvu_alloc_rsrc(&mcam->counters);
+ if (index < 0)
+ break;
+ rsp->cntr_list[cntr] = index;
+ rsp->count++;
+ mcam->cntr2pfvf_map[index] = pcifunc;
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (err) {
+ mutex_unlock(&mcam->lock);
+ return err;
+ }
+
+ /* Mark counter as free/unused */
+ mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP;
+ rvu_free_rsrc(&mcam->counters, req->cntr);
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ entry = index + 1;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+ struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (rc)
+ goto exit;
+
+ /* Unmap the MCAM entry and counter */
+ if (!req->all) {
+ rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry);
+ if (rc)
+ goto exit;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+ goto exit;
+ }
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ entry = index + 1;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct npc_mcam_oper_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_and_write_entry_req *req,
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req;
+ struct npc_mcam_alloc_counter_rsp cntr_rsp;
+ struct npc_mcam_alloc_entry_req entry_req;
+ struct npc_mcam_alloc_entry_rsp entry_rsp;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 entry = NPC_MCAM_ENTRY_INVALID;
+ u16 cntr = NPC_MCAM_ENTRY_INVALID;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Try to allocate a MCAM entry */
+ entry_req.hdr.pcifunc = req->hdr.pcifunc;
+ entry_req.contig = true;
+ entry_req.priority = req->priority;
+ entry_req.ref_entry = req->ref_entry;
+ entry_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu,
+ &entry_req, &entry_rsp);
+ if (rc)
+ return rc;
+
+ if (!entry_rsp.count)
+ return NPC_MCAM_ALLOC_FAILED;
+
+ entry = entry_rsp.entry;
+
+ if (!req->alloc_cntr)
+ goto write_entry;
+
+ /* Now allocate counter */
+ cntr_req.hdr.pcifunc = req->hdr.pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (rc) {
+ /* Free allocated MCAM entry */
+ mutex_lock(&mcam->lock);
+ mcam->entry2pfvf_map[entry] = 0;
+ npc_mcam_clear_bit(mcam, entry);
+ mutex_unlock(&mcam->lock);
+ return rc;
+ }
+
+ cntr = cntr_rsp.cntr;
+
+write_entry:
+ mutex_lock(&mcam->lock);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->alloc_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr);
+ mutex_unlock(&mcam->lock);
+
+ rsp->entry = entry;
+ rsp->cntr = cntr;
+
+ return 0;
+}
+
+#define GET_KEX_CFG(intf) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf))
+
+#define GET_KEX_FLAGS(ld) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld))
+
+#define GET_KEX_LD(intf, lid, lt, ld) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld))
+
+#define GET_KEX_LDFLAGS(intf, ld, fl) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl))
+
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+ struct npc_get_kex_cfg_rsp *rsp)
+{
+ int lid, lt, ld, fl;
+
+ rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX);
+ rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX);
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_RX, lid, lt, ld);
+ rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_TX, lid, lt, ld);
+ }
+ }
+ }
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld);
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl);
+ rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
+ }
+ }
+ return 0;
+}
+
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+ bool enable;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (!pfvf->rxvlan)
+ return 0;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_UCAST_ENTRY);
+ pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
+ NIX_INTF_RX, &pfvf->entry, enable);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 697d9b374f5e..c7cd0081058e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2485,13 +2485,11 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
skb_copy_hash(skb, re->skb);
- skb->vlan_proto = re->skb->vlan_proto;
- skb->vlan_tci = re->skb->vlan_tci;
+ __vlan_hwaccel_copy_tag(skb, re->skb);
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
length, PCI_DMA_FROMDEVICE);
- re->skb->vlan_proto = 0;
- re->skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(re->skb);
skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7dbfdac4067a..399f565dd85a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -243,7 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
@@ -353,8 +353,9 @@ static int mtk_phy_connect(struct net_device *dev)
phy_set_max_speed(dev->phydev, SPEED_1000);
phy_support_asym_pause(dev->phydev);
- dev->phydev->advertising = dev->phydev->supported |
- ADVERTISED_Autoneg;
+ linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ dev->phydev->advertising);
phy_start_aneg(dev->phydev);
of_node_put(np);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index d8e9a323122e..db909b6069b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -144,9 +144,9 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
- int cq_num)
+ int cq_num, u8 opmod)
{
- return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
+ return mlx4_cmd(dev, mailbox->dma, cq_num, opmod,
MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
@@ -287,11 +287,61 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
__mlx4_cq_free_icm(dev, cqn);
}
+static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
+{
+ int entries_per_copy = PAGE_SIZE / cqe_size;
+ void *init_ents;
+ int err = 0;
+ int i;
+
+ init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!init_ents)
+ return -ENOMEM;
+
+ /* Populate a list of CQ entries to reduce the number of
+ * copy_to_user calls. 0xcc is the initialization value
+ * required by the FW.
+ */
+ memset(init_ents, 0xcc, PAGE_SIZE);
+
+ if (entries_per_copy < entries) {
+ for (i = 0; i < entries / entries_per_copy; i++) {
+ err = copy_to_user(buf, init_ents, PAGE_SIZE);
+ if (err)
+ goto out;
+
+ buf += PAGE_SIZE;
+ }
+ } else {
+ err = copy_to_user(buf, init_ents, entries * cqe_size);
+ }
+
+out:
+ kfree(init_ents);
+
+ return err;
+}
+
+static void mlx4_init_kernel_cqes(struct mlx4_buf *buf,
+ int entries,
+ int cqe_size)
+{
+ int i;
+
+ if (buf->nbufs == 1)
+ memset(buf->direct.buf, 0xcc, entries * cqe_size);
+ else
+ for (i = 0; i < buf->npages; i++)
+ memset(buf->page_list[i].buf, 0xcc,
+ 1UL << buf->page_shift);
+}
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
struct mlx4_cq *cq, unsigned vector, int collapsed,
- int timestamp_en)
+ int timestamp_en, void *buf_addr, bool user_cq)
{
+ bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
struct mlx4_cmd_mailbox *mailbox;
@@ -336,7 +386,20 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
cq_context->db_rec_addr = cpu_to_be64(db_rec);
- err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
+ if (sw_cq_init) {
+ if (user_cq) {
+ err = mlx4_init_user_cqes(buf_addr, nent,
+ dev->caps.cqe_size);
+ if (err)
+ sw_cq_init = false;
+ } else {
+ mlx4_init_kernel_cqes(buf_addr, nent,
+ dev->caps.cqe_size);
+ }
+ }
+
+ err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init);
+
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err_radix;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1e487acb4667..062a88fcc5d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -143,7 +143,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
- cq->vector, 0, timestamp_en);
+ cq->vector, 0, timestamp_en, &cq->wqres.buf, false);
if (err)
goto free_eq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index db00bf1c23f5..fd09ba98c0a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -875,7 +875,7 @@ csum_none:
skb->data_len = length;
napi_gro_frags(&cq->napi);
} else {
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb_clear_hash(skb);
}
next:
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index babcfd9c0571..7df728f1e5b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -166,6 +166,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[37] = "sl to vl mapping table change event support",
[38] = "user MAC support",
[39] = "Report driver version to FW support",
+ [40] = "SW CQ initialization support",
};
int i;
@@ -1098,6 +1099,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
if (field32 & (1 << 21))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
+ if (field32 & (1 << 23))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
for (i = 1; i <= dev_cap->num_ports; i++) {
err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 6dacaeba2fbf..9afdf955f2bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -127,7 +127,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
else
#endif
if (skb_vlan_tag_present(skb))
- up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+ up = skb_vlan_tag_get_prio(skb);
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 8a291eb36c64..080ddd1942ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -80,6 +80,7 @@ config MLXSW_SPECTRUM
depends on IPV6_GRE || IPV6_GRE=n
select GENERIC_ALLOCATOR
select PARMAN
+ select OBJAGG
select MLXFW
default m
---help---
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 785bf01fe2be..df78d23b3ec3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -426,15 +426,17 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask, int block_start, int block_end)
+ char *key, char *mask)
{
+ unsigned int blocks_count =
+ mlxsw_afk_key_info_blocks_count_get(key_info);
char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
int block_index, i;
- for (i = block_start; i <= block_end; i++) {
+ for (i = 0; i < blocks_count; i++) {
memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
@@ -451,10 +453,18 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
values->storage.mask);
}
- if (key)
- mlxsw_afk->ops->encode_block(block_key, i, key);
- if (mask)
- mlxsw_afk->ops->encode_block(block_mask, i, mask);
+ mlxsw_afk->ops->encode_block(key, i, block_key);
+ mlxsw_afk->ops->encode_block(mask, i, block_mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
+
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end)
+{
+ int i;
+
+ for (i = block_start; i <= block_end; i++)
+ mlxsw_afk->ops->clear_block(key, i);
+}
+EXPORT_SYMBOL(mlxsw_afk_clear);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index c29c045d826d..bcd264135af7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -188,7 +188,8 @@ struct mlxsw_afk;
struct mlxsw_afk_ops {
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
- void (*encode_block)(char *block, int block_index, char *output);
+ void (*encode_block)(char *output, int block_index, char *block);
+ void (*clear_block)(char *output, int block_index);
};
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
@@ -228,6 +229,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask, int block_start, int block_end);
+ char *key, char *mask);
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 6d29dc428608..61f897b40f82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -16,6 +16,15 @@
#define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MAX_DUTY 255
+/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
+ * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXSW_THERMAL_SPEED_MIN (MLXSW_THERMAL_MAX_STATE + 2)
+#define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2)
+#define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */
struct mlxsw_thermal_trip {
int type;
@@ -68,6 +77,7 @@ struct mlxsw_thermal {
const struct mlxsw_bus_info *bus_info;
struct thermal_zone_device *tzdev;
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
};
@@ -285,12 +295,51 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
struct mlxsw_thermal *thermal = cdev->devdata;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int err, idx;
+ unsigned long cur_state, i;
+ int idx;
+ u8 duty;
+ int err;
idx = mlxsw_get_cooling_device_idx(thermal, cdev);
if (idx < 0)
return idx;
+ /* Verify if this request is for changing allowed fan dynamical
+ * minimum. If it is - update cooling levels accordingly and update
+ * state, if current state is below the newly requested minimum state.
+ * For example, if current state is 5, and minimal state is to be
+ * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
+ * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
+ * overwritten.
+ */
+ if (state >= MLXSW_THERMAL_SPEED_MIN &&
+ state <= MLXSW_THERMAL_SPEED_MAX) {
+ state -= MLXSW_THERMAL_MAX_STATE;
+ for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
+ thermal->cooling_levels[i] = max(state, i);
+
+ mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err)
+ return err;
+
+ duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
+ cur_state = mlxsw_duty_to_state(duty);
+
+ /* If current fan state is lower than requested dynamical
+ * minimum, increase fan speed up to dynamical minimum.
+ */
+ if (state < cur_state)
+ return 0;
+
+ state = cur_state;
+ }
+
+ if (state > MLXSW_THERMAL_MAX_STATE)
+ return -EINVAL;
+
+ /* Normalize the state to the valid speed range. */
+ state = thermal->cooling_levels[state];
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
@@ -369,6 +418,11 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
}
+ /* Initialize cooling levels per PWM state. */
+ for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
+ thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
+ i);
+
thermal->tzdev = thermal_zone_device_register("mlxsw",
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index db3d2790aeec..5c8a38f61a93 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -641,6 +641,10 @@ enum mlxsw_reg_sfn_rec_type {
MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
/* Aged-out MAC address on a LAG port. */
MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8,
+ /* Learned unicast tunnel record. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL = 0xD,
+ /* Aged-out unicast tunnel record. */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL = 0xE,
};
/* reg_sfn_rec_type
@@ -704,6 +708,66 @@ static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index,
*p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index);
}
+/* reg_sfn_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * address of the remote VTEP.
+ * When protocol is IPv6, reserved.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_msb, MLXSW_REG_SFN_BASE_LEN, 24,
+ 8, MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfn_uc_tunnel_protocol {
+ MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV4,
+ MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfn_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27,
+ 1, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+/* reg_sfn_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 address of the remote VTEP.
+ * When protocol is IPv6, ipv6_id to be queried from TNIPSD.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0,
+ 24, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+enum mlxsw_reg_sfn_tunnel_port {
+ MLXSW_REG_SFN_TUNNEL_PORT_NVE,
+ MLXSW_REG_SFN_TUNNEL_PORT_VPLS,
+ MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_sfn_uc_tunnel_port
+ * Tunnel port.
+ * Reserved on Spectrum.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, tunnel_port, MLXSW_REG_SFN_BASE_LEN, 0, 4,
+ MLXSW_REG_SFN_REC_LEN, 0x10, false);
+
+static inline void
+mlxsw_reg_sfn_uc_tunnel_unpack(char *payload, int rec_index, char *mac,
+ u16 *p_fid, u32 *p_uip,
+ enum mlxsw_reg_sfn_uc_tunnel_protocol *p_proto)
+{
+ u32 uip_msb, uip_lsb;
+
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_fid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ uip_msb = mlxsw_reg_sfn_uc_tunnel_uip_msb_get(payload, rec_index);
+ uip_lsb = mlxsw_reg_sfn_uc_tunnel_uip_lsb_get(payload, rec_index);
+ *p_uip = uip_msb << 24 | uip_lsb;
+ *p_proto = mlxsw_reg_sfn_uc_tunnel_protocol_get(payload, rec_index);
+}
+
/* SPMS - Switch Port MSTP/RSTP State Register
* -------------------------------------------
* Configures the spanning tree state of a physical port.
@@ -2834,8 +2898,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
u32 priority,
const char *tcam_region_info,
const char *key, u8 erp_id,
- bool large_exists, u32 lkey_id,
- u32 action_pointer)
+ u16 delta_start, u8 delta_mask,
+ u8 delta_value, bool large_exists,
+ u32 lkey_id, u32 action_pointer)
{
MLXSW_REG_ZERO(ptce3, payload);
mlxsw_reg_ptce3_v_set(payload, valid);
@@ -2844,6 +2909,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_delta_start_set(payload, delta_start);
+ mlxsw_reg_ptce3_delta_mask_set(payload, delta_mask);
+ mlxsw_reg_ptce3_delta_value_set(payload, delta_value);
mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
@@ -4231,8 +4299,11 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2863_CNT = 0x1,
MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
+ MLXSW_REG_PPCNT_RFC_3635_CNT = 0x3,
MLXSW_REG_PPCNT_EXT_CNT = 0x5,
+ MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
@@ -4247,6 +4318,7 @@ enum mlxsw_reg_ppcnt_grp {
* 0x2: RFC 2819 Counters
* 0x3: RFC 3635 Counters
* 0x5: Ethernet Extended Counters
+ * 0x6: Ethernet Discard Counters
* 0x8: Link Level Retransmission Counters
* 0x10: Per Priority Counters
* 0x11: Per Traffic Class Counters
@@ -4390,8 +4462,46 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+/* Ethernet RFC 2863 Counter Group */
+
+/* reg_ppcnt_if_in_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_in_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_if_out_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_if_out_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
/* Ethernet RFC 2819 Counter Group */
+/* reg_ppcnt_ether_stats_undersize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_undersize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_ether_stats_oversize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_oversize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_ether_stats_fragments
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_fragments,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
/* reg_ppcnt_ether_stats_pkts64octets
* Access: RO
*/
@@ -4452,6 +4562,32 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+/* Ethernet RFC 3635 Counter Group */
+
+/* reg_ppcnt_dot3stats_fcs_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_fcs_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_dot3stats_symbol_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_symbol_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_dot3control_in_unknown_opcodes
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3control_in_unknown_opcodes,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_dot3in_pause_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3in_pause_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
/* Ethernet Extended Counter Group Counters */
/* reg_ppcnt_ecn_marked
@@ -4460,6 +4596,80 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
MLXSW_ITEM64(reg, ppcnt, ecn_marked,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+/* Ethernet Discard Counter Group Counters */
+
+/* reg_ppcnt_ingress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_ingress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_ingress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_ingress_tag_frame_type
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tag_frame_type,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
+
+/* reg_ppcnt_egress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
+
+/* reg_ppcnt_loopback_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, loopback_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
+
+/* reg_ppcnt_egress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_egress_hoq
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_hoq,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* reg_ppcnt_egress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
+
+/* reg_ppcnt_ingress_tx_link_down
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tx_link_down,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_egress_stp_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_stp_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_egress_sll
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_sll,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
/* Ethernet Per Priority Group Counters */
/* reg_ppcnt_rx_octets
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9bec940330a4..93378d507962 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1876,8 +1876,38 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
+ {
+ .str = "if_in_discards",
+ .getter = mlxsw_reg_ppcnt_if_in_discards_get,
+ },
+ {
+ .str = "if_out_discards",
+ .getter = mlxsw_reg_ppcnt_if_out_discards_get,
+ },
+ {
+ .str = "if_out_errors",
+ .getter = mlxsw_reg_ppcnt_if_out_errors_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
{
+ .str = "ether_stats_undersize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
+ },
+ {
+ .str = "ether_stats_oversize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
+ },
+ {
+ .str = "ether_stats_fragments",
+ .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
+ },
+ {
.str = "ether_pkts64octets",
.getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
},
@@ -1922,6 +1952,82 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
+ {
+ .str = "dot3stats_fcs_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
+ },
+ {
+ .str = "dot3stats_symbol_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
+ },
+ {
+ .str = "dot3control_in_unknown_opcodes",
+ .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
+ },
+ {
+ .str = "dot3in_pause_frames",
+ .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
+ {
+ .str = "discard_ingress_general",
+ .getter = mlxsw_reg_ppcnt_ingress_general_get,
+ },
+ {
+ .str = "discard_ingress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
+ },
+ {
+ .str = "discard_ingress_tag_frame_type",
+ .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
+ },
+ {
+ .str = "discard_egress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
+ },
+ {
+ .str = "discard_loopback_filter",
+ .getter = mlxsw_reg_ppcnt_loopback_filter_get,
+ },
+ {
+ .str = "discard_egress_general",
+ .getter = mlxsw_reg_ppcnt_egress_general_get,
+ },
+ {
+ .str = "discard_egress_hoq",
+ .getter = mlxsw_reg_ppcnt_egress_hoq_get,
+ },
+ {
+ .str = "discard_egress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_tx_link_down",
+ .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
+ },
+ {
+ .str = "discard_egress_stp_filter",
+ .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
+ },
+ {
+ .str = "discard_egress_sll",
+ .getter = mlxsw_reg_ppcnt_egress_sll_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
{
.str = "rx_octets_prio",
@@ -1974,7 +2080,10 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+ MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
(MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
IEEE_8021QAZ_MAX_TCS) + \
(MLXSW_SP_PORT_HW_TC_STATS_LEN * \
@@ -2015,12 +2124,31 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
mlxsw_sp_port_get_prio_strings(&p, i);
@@ -2063,10 +2191,22 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
*p_hw_stats = mlxsw_sp_port_hw_stats;
*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_2863_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_RFC_2819_CNT:
*p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
*p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_3635_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_DISCARD_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
+ *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_PRIO_CNT:
*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2116,11 +2256,26 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+ /* RFC 2863 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+
/* RFC 2819 Counters */
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
data, data_index);
data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ /* RFC 3635 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+
+ /* Discard Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+
/* Per-Priority Counters */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
@@ -3956,16 +4111,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_kvdl_fini(mlxsw_sp);
}
+/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
+ * 802.1Q FIDs
+ */
+#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
+ VLAN_VID_MASK - 1)
+
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
- .max_fid_offset_flood_tables = 3,
- .fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -3988,10 +4147,8 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
- .max_fid_offset_flood_tables = 3,
- .fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 0875a79cbe7b..244972bf8b0a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -721,6 +721,10 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
+bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index);
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
__be32 vni);
int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni);
@@ -728,7 +732,7 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
u32 nve_flood_index);
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid);
bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid);
-int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni);
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni, int nve_ifindex);
void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid);
bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
@@ -810,6 +814,9 @@ struct mlxsw_sp_nve_params {
extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[];
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_l3proto proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 8ca77f3e8f27..62e6cf4bc16e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -34,15 +34,15 @@ mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregio
{
struct mlxsw_sp_acl_atcam_region *aregion;
struct mlxsw_sp_acl_atcam_entry *aentry;
- struct mlxsw_sp_acl_erp *erp;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
- erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
- if (IS_ERR(erp))
- return PTR_ERR(erp);
- aentry->erp = erp;
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
return 0;
}
@@ -57,7 +57,7 @@ mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregio
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
- mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
}
static const struct mlxsw_sp_acl_ctcam_region_ops
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 2dda028f94db..e7bd8733e58e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -14,8 +14,8 @@
#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START 0
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END 5
struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
@@ -34,7 +34,7 @@ struct mlxsw_sp_acl_atcam_region_ops {
void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
struct mlxsw_sp_acl_atcam_lkey_id *
(*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+ char *enc_key, u8 erp_id);
void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
};
@@ -64,7 +64,7 @@ static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
static bool
mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
{
- return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+ return mlxsw_sp_acl_erp_mask_is_ctcam(aentry->erp_mask);
}
static int
@@ -90,8 +90,7 @@ mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei,
- u8 erp_id)
+ char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_generic *region_generic;
@@ -220,8 +219,7 @@ mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei,
- u8 erp_id)
+ char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
@@ -230,9 +228,10 @@ mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
- NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
- MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+ memcpy(ht_key.enc_key, enc_key, sizeof(ht_key.enc_key));
+ mlxsw_afk_clear(afk, ht_key.enc_key,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END);
ht_key.erp_id = erp_id;
lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
mlxsw_sp_acl_atcam_lkey_id_ht_params);
@@ -379,7 +378,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
- u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
u32 kvdl_index, priority;
@@ -389,7 +388,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+ erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -398,6 +398,9 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
kvdl_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
@@ -418,12 +421,17 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
- u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ char *enc_key = aentry->ht_key.enc_key;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
- region->tcam_region_info, aentry->ht_key.enc_key,
- erp_id, refcount_read(&lkey_id->refcnt) != 1,
+ region->tcam_region_info,
+ enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1,
lkey_id->id, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
aregion->ops->lkey_id_put(aregion, lkey_id);
@@ -438,19 +446,30 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
- struct mlxsw_sp_acl_erp *erp;
- unsigned int blocks_count;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
int err;
- blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
-
- erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
- if (IS_ERR(erp))
- return PTR_ERR(erp);
- aentry->erp = erp;
- aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+ aentry->full_enc_key, mask);
+
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
+ memcpy(aentry->ht_key.enc_key, aentry->full_enc_key,
+ sizeof(aentry->ht_key.enc_key));
+
+ /* Compute all needed delta information and clear the delta bits
+ * from the encrypted key.
+ */
+ delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
+ aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
+ aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
+ aentry->delta_info.value =
+ mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
/* We can't insert identical rules into the A-TCAM, so fail and
* let the rule spill into C-TCAM
@@ -472,7 +491,7 @@ err_rule_insert:
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
err_rhashtable_insert:
- mlxsw_sp_acl_erp_put(aregion, erp);
+ mlxsw_sp_acl_erp_mask_put(aregion, erp_mask);
return err;
}
@@ -484,7 +503,7 @@ __mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
- mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
}
int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index e3c6fe8b1d40..f3e834bfea1a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -46,7 +46,6 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- unsigned int blocks_count;
char *act_set;
u32 priority;
char *mask;
@@ -63,9 +62,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
centry->parman_item.index, priority);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
- blocks_count - 1);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
err = cregion->ops->entry_insert(cregion, centry, mask);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 0a4fd3c8662a..d9a4b7e8434b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -7,7 +7,7 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/rhashtable.h>
+#include <linux/objagg.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@@ -29,6 +29,8 @@ struct mlxsw_sp_acl_erp_core {
struct mlxsw_sp_acl_erp_key {
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+#define __MASK_LEN 0x38
+#define __MASK_IDX(i) (__MASK_LEN - (i) - 1)
bool ctcam;
};
@@ -36,10 +38,8 @@ struct mlxsw_sp_acl_erp {
struct mlxsw_sp_acl_erp_key key;
u8 id;
u8 index;
- refcount_t refcnt;
DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
struct list_head list;
- struct rhash_head ht_node;
struct mlxsw_sp_acl_erp_table *erp_table;
};
@@ -53,7 +53,6 @@ struct mlxsw_sp_acl_erp_table {
DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
struct list_head atcam_erps_list;
- struct rhashtable erp_ht;
struct mlxsw_sp_acl_erp_core *erp_core;
struct mlxsw_sp_acl_atcam_region *aregion;
const struct mlxsw_sp_acl_erp_table_ops *ops;
@@ -61,12 +60,8 @@ struct mlxsw_sp_acl_erp_table {
unsigned int num_atcam_erps;
unsigned int num_max_atcam_erps;
unsigned int num_ctcam_erps;
-};
-
-static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
- .key_len = sizeof(struct mlxsw_sp_acl_erp_key),
- .key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
- .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+ unsigned int num_deltas;
+ struct objagg *objagg;
};
struct mlxsw_sp_acl_erp_table_ops {
@@ -119,16 +114,6 @@ static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
.erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
};
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
-{
- return erp->key.ctcam;
-}
-
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
-{
- return erp->id;
-}
-
static unsigned int
mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
{
@@ -194,12 +179,15 @@ mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
static int
mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
- const struct mlxsw_sp_acl_erp *erp)
+ struct mlxsw_sp_acl_erp_key *key)
{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
@@ -210,7 +198,7 @@ mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
return err;
@@ -218,12 +206,15 @@ err_master_mask_update:
static int
mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
- const struct mlxsw_sp_acl_erp *erp)
+ struct mlxsw_sp_acl_erp_key *key)
{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
@@ -234,7 +225,7 @@ mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
return err;
@@ -256,26 +247,16 @@ mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
goto err_erp_id_get;
memcpy(&erp->key, key, sizeof(*key));
- bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
- MLXSW_SP_ACL_TCAM_MASK_LEN);
list_add(&erp->list, &erp_table->atcam_erps_list);
- refcount_set(&erp->refcnt, 1);
erp_table->num_atcam_erps++;
erp->erp_table = erp_table;
- err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
goto err_master_mask_set;
- err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- if (err)
- goto err_rhashtable_insert;
-
return erp;
-err_rhashtable_insert:
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
err_master_mask_set:
erp_table->num_atcam_erps--;
list_del(&erp->list);
@@ -290,9 +271,7 @@ mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
erp_table->num_atcam_erps--;
list_del(&erp->list);
mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
@@ -647,9 +626,56 @@ mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
mlxsw_sp_acl_erp_table_enable(erp_table, false);
}
+static int
+__mlxsw_sp_acl_erp_table_other_inc(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *inc_num)
+{
+ int err;
+
+ /* If there are C-TCAM eRP or deltas in use we need to transition
+ * the region to use eRP table, if it is not already done
+ */
+ if (erp_table->ops != &erp_two_masks_ops &&
+ erp_table->ops != &erp_multiple_masks_ops) {
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return err;
+ }
+
+ /* When C-TCAM or deltas are used, the eRP table must be used */
+ if (erp_table->ops != &erp_multiple_masks_ops)
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ (*inc_num)++;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_ctcam_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static int mlxsw_sp_acl_erp_delta_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_deltas);
+}
+
static void
-mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+__mlxsw_sp_acl_erp_table_other_dec(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *dec_num)
{
+ (*dec_num)--;
+
+ /* If there are no C-TCAM eRP or deltas in use, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use.
+ */
+ if (erp_table->num_ctcam_erps > 0 || erp_table->num_deltas > 0)
+ return;
+
switch (erp_table->num_atcam_erps) {
case 2:
/* Keep using the eRP table, but correctly set the
@@ -683,9 +709,21 @@ mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
}
}
+static void mlxsw_sp_acl_erp_ctcam_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static void mlxsw_sp_acl_erp_delta_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_deltas);
+}
+
static struct mlxsw_sp_acl_erp *
-__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
- struct mlxsw_sp_acl_erp_key *key)
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
@@ -697,89 +735,41 @@ __mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
memcpy(&erp->key, key, sizeof(*key));
bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
- refcount_set(&erp->refcnt, 1);
- erp_table->num_ctcam_erps++;
- erp->erp_table = erp_table;
- err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ err = mlxsw_sp_acl_erp_ctcam_inc(erp_table);
if (err)
- goto err_master_mask_set;
+ goto err_erp_ctcam_inc;
- err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
- goto err_rhashtable_insert;
+ goto err_master_mask_set;
err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
if (err)
goto err_erp_region_ctcam_enable;
- /* When C-TCAM is used, the eRP table must be used */
- erp_table->ops = &erp_multiple_masks_ops;
-
return erp;
err_erp_region_ctcam_enable:
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
-err_rhashtable_insert:
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
err_master_mask_set:
- erp_table->num_ctcam_erps--;
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
+err_erp_ctcam_inc:
kfree(erp);
return ERR_PTR(err);
}
-static struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
- struct mlxsw_sp_acl_erp_key *key)
-{
- struct mlxsw_sp_acl_erp *erp;
- int err;
-
- /* There is a special situation where we need to spill rules
- * into the C-TCAM, yet the region is still using a master
- * mask and thus not performing a lookup in the C-TCAM. This
- * can happen when two rules that only differ in priority - and
- * thus sharing the same key - are programmed. In this case
- * we transition the region to use an eRP table
- */
- err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
- if (err)
- return ERR_PTR(err);
-
- erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
- if (IS_ERR(erp)) {
- err = PTR_ERR(erp);
- goto err_erp_create;
- }
-
- return erp;
-
-err_erp_create:
- mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
- return ERR_PTR(err);
-}
-
static void
mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
- erp_table->num_ctcam_erps--;
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
kfree(erp);
-
- /* Once the last C-TCAM eRP was destroyed, the state we
- * transition to depends on the number of A-TCAM eRPs currently
- * in use
- */
- if (erp_table->num_ctcam_erps > 0)
- return;
- mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
}
static struct mlxsw_sp_acl_erp *
@@ -790,7 +780,7 @@ mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
int err;
if (key->ctcam)
- return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
/* Expand the eRP table for the new eRP, if needed */
err = mlxsw_sp_acl_erp_table_expand(erp_table);
@@ -838,7 +828,8 @@ mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
mlxsw_sp_acl_erp_generic_destroy(erp);
- if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0 &&
+ erp_table->num_deltas == 0)
erp_table->ops = &erp_two_masks_ops;
}
@@ -940,13 +931,12 @@ mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
WARN_ON(1);
}
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
- const char *mask, bool ctcam)
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
{
- struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key key;
- struct mlxsw_sp_acl_erp *erp;
+ struct objagg_obj *objagg_obj;
/* eRPs are allocated from a shared resource, but currently all
* allocations are done under RTNL.
@@ -955,29 +945,238 @@ mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
key.ctcam = ctcam;
- erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
- mlxsw_sp_acl_erp_ht_params);
- if (erp) {
- refcount_inc(&erp->refcnt);
- return erp;
+ objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key);
+ if (IS_ERR(objagg_obj))
+ return ERR_CAST(objagg_obj);
+ return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
+}
+
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+
+ ASSERT_RTNL();
+ objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
+}
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_key *key = objagg_obj_raw(objagg_obj);
+
+ return key->ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+
+ return erp->id;
+}
+
+struct mlxsw_sp_acl_erp_delta {
+ struct mlxsw_sp_acl_erp_key key;
+ u16 start;
+ u8 mask;
+};
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->start;
+}
+
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->mask;
+}
+
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ u16 tmp;
+
+ if (!mask)
+ return 0;
+
+ tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)];
+ if (start / 8 + 1 < __MASK_LEN)
+ tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8;
+ tmp >>= start % 8;
+ tmp &= mask;
+ return tmp;
+}
+
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ unsigned char *byte;
+ u16 tmp;
+
+ tmp = mask;
+ tmp <<= start % 8;
+ tmp = ~tmp;
+
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)];
+ *byte &= tmp & 0xff;
+ if (start / 8 + 1 < __MASK_LEN) {
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)];
+ *byte &= (tmp >> 8) & 0xff;
}
+}
+
+static const struct mlxsw_sp_acl_erp_delta
+mlxsw_sp_acl_erp_delta_default = {};
- return erp_table->ops->erp_create(erp_table, &key);
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (!delta)
+ delta = &mlxsw_sp_acl_erp_delta_default;
+ return delta;
}
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_erp *erp)
+static int
+mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
+ const struct mlxsw_sp_acl_erp_key *key,
+ u16 *delta_start, u8 *delta_mask)
{
+ int offset = 0;
+ int si = -1;
+ u16 pmask;
+ u16 mask;
+ int i;
+
+ /* The difference between 2 masks can be up to 8 consecutive bits. */
+ for (i = 0; i < __MASK_LEN; i++) {
+ if (parent_key->mask[__MASK_IDX(i)] == key->mask[__MASK_IDX(i)])
+ continue;
+ if (si == -1)
+ si = i;
+ else if (si != i - 1)
+ return -EINVAL;
+ }
+ if (si == -1) {
+ /* The masks are the same, this cannot happen.
+ * That means the caller is broken.
+ */
+ WARN_ON(1);
+ *delta_start = 0;
+ *delta_mask = 0;
+ return 0;
+ }
+ pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
+ mask = (unsigned char) key->mask[__MASK_IDX(si)];
+ if (si + 1 < __MASK_LEN) {
+ pmask |= (unsigned char) parent_key->mask[__MASK_IDX(si + 1)] << 8;
+ mask |= (unsigned char) key->mask[__MASK_IDX(si + 1)] << 8;
+ }
+
+ if ((pmask ^ mask) & pmask)
+ return -EINVAL;
+ mask &= ~pmask;
+ while (!(mask & (1 << offset)))
+ offset++;
+ while (!(mask & 1))
+ mask >>= 1;
+ if (mask & 0xff00)
+ return -EINVAL;
+
+ *delta_start = si * 8 + offset;
+ *delta_mask = mask;
+
+ return 0;
+}
+
+static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
+ void *obj)
+{
+ struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+ struct mlxsw_sp_acl_erp_delta *delta;
+ u16 delta_start;
+ u8 delta_mask;
+ int err;
- ASSERT_RTNL();
+ if (parent_key->ctcam || key->ctcam)
+ return ERR_PTR(-EINVAL);
+ err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
+ &delta_start, &delta_mask);
+ if (err)
+ return ERR_PTR(-EINVAL);
- if (!refcount_dec_and_test(&erp->refcnt))
- return;
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->start = delta_start;
+ delta->mask = delta_mask;
+
+ err = mlxsw_sp_acl_erp_delta_inc(erp_table);
+ if (err)
+ goto err_erp_delta_inc;
+
+ memcpy(&delta->key, key, sizeof(*key));
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &delta->key);
+ if (err)
+ goto err_master_mask_set;
+
+ return delta;
- erp_table->ops->erp_destroy(erp_table, erp);
+err_master_mask_set:
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+err_erp_delta_inc:
+ kfree(delta);
+ return ERR_PTR(err);
}
+static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv)
+{
+ struct mlxsw_sp_acl_erp_delta *delta = delta_priv;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &delta->key);
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+ kfree(delta);
+}
+
+static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+
+ return erp_table->ops->erp_create(erp_table, key);
+}
+
+static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ erp_table->ops->erp_destroy(erp_table, root_priv);
+}
+
+static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
+ .obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+ .delta_create = mlxsw_sp_acl_erp_delta_create,
+ .delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
+ .root_create = mlxsw_sp_acl_erp_root_create,
+ .root_destroy = mlxsw_sp_acl_erp_root_destroy,
+};
+
static struct mlxsw_sp_acl_erp_table *
mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
{
@@ -988,9 +1187,12 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
if (!erp_table)
return ERR_PTR(-ENOMEM);
- err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
- if (err)
- goto err_rhashtable_init;
+ erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops,
+ aregion);
+ if (IS_ERR(erp_table->objagg)) {
+ err = PTR_ERR(erp_table->objagg);
+ goto err_objagg_create;
+ }
erp_table->erp_core = aregion->atcam->erp_core;
erp_table->ops = &erp_no_mask_ops;
@@ -999,7 +1201,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
return erp_table;
-err_rhashtable_init:
+err_objagg_create:
kfree(erp_table);
return ERR_PTR(err);
}
@@ -1008,7 +1210,7 @@ static void
mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
{
WARN_ON(!list_empty(&erp_table->atcam_erps_list));
- rhashtable_destroy(&erp_table->erp_ht);
+ objagg_destroy(erp_table->objagg);
kfree(erp_table);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index d409b09ba8df..2e1e8c4b3922 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -98,8 +98,8 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
-static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
- char *output)
+static void mlxsw_sp1_afk_encode_block(char *output, int block_index,
+ char *block)
{
unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
char *output_indexed = output + offset;
@@ -107,10 +107,19 @@ static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
}
+static void mlxsw_sp1_afk_clear_block(char *output, int block_index)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memset(output_indexed, 0, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.blocks = mlxsw_sp1_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
.encode_block = mlxsw_sp1_afk_encode_block,
+ .clear_block = mlxsw_sp1_afk_clear_block,
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
@@ -263,10 +272,9 @@ static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
};
-static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
- char *output)
+static void __mlxsw_sp2_afk_block_value_set(char *output, int block_index,
+ u64 block_value)
{
- u64 block_value = mlxsw_sp2_afk_block_value_get(block);
const struct mlxsw_sp2_afk_block_layout *block_layout;
if (WARN_ON(block_index < 0 ||
@@ -278,8 +286,22 @@ static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
&block_layout->item, 0, block_value);
}
+static void mlxsw_sp2_afk_encode_block(char *output, int block_index,
+ char *block)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+
+ __mlxsw_sp2_afk_block_value_set(output, block_index, block_value);
+}
+
+static void mlxsw_sp2_afk_clear_block(char *output, int block_index)
+{
+ __mlxsw_sp2_afk_block_value_set(output, block_index, 0);
+}
+
const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.blocks = mlxsw_sp2_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
.encode_block = mlxsw_sp2_afk_encode_block,
+ .clear_block = mlxsw_sp2_afk_clear_block,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 219a4e26c332..9a73759d901f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -154,7 +154,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+ * minus delta bits.
+ */
u8 erp_id;
};
@@ -165,9 +167,15 @@ struct mlxsw_sp_acl_atcam_chunk {
struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ struct {
+ u16 start;
+ u8 mask;
+ u8 value;
+ } delta_info;
struct mlxsw_sp_acl_ctcam_entry centry;
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
- struct mlxsw_sp_acl_erp *erp;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
};
static inline struct mlxsw_sp_acl_atcam_region *
@@ -209,15 +217,27 @@ int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
-struct mlxsw_sp_acl_erp;
-
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
- const char *mask, bool ctcam);
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_erp *erp);
+struct mlxsw_sp_acl_erp_delta;
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+
+struct mlxsw_sp_acl_erp_mask;
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask);
int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index a3db033d7399..6830e79aed93 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -15,6 +15,7 @@
struct mlxsw_sp_fid_family;
struct mlxsw_sp_fid_core {
+ struct rhashtable fid_ht;
struct rhashtable vni_ht;
struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
unsigned int *port_fid_mappings;
@@ -26,10 +27,12 @@ struct mlxsw_sp_fid {
unsigned int ref_count;
u16 fid_index;
struct mlxsw_sp_fid_family *fid_family;
+ struct rhash_head ht_node;
struct rhash_head vni_ht_node;
__be32 vni;
u32 nve_flood_index;
+ int nve_ifindex;
u8 vni_valid:1,
nve_flood_index_valid:1;
};
@@ -44,6 +47,12 @@ struct mlxsw_sp_fid_8021d {
int br_ifindex;
};
+static const struct rhashtable_params mlxsw_sp_fid_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp_fid, fid_index),
+ .key_offset = offsetof(struct mlxsw_sp_fid, fid_index),
+ .head_offset = offsetof(struct mlxsw_sp_fid, ht_node),
+};
+
static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
.key_len = sizeof_field(struct mlxsw_sp_fid, vni),
.key_offset = offsetof(struct mlxsw_sp_fid, vni),
@@ -89,6 +98,7 @@ struct mlxsw_sp_fid_family {
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
+ u8 lag_vid_valid:1;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
@@ -113,6 +123,34 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
+bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->lag_vid_valid;
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ struct mlxsw_sp_fid *fid;
+
+ fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index,
+ mlxsw_sp_fid_ht_params);
+ if (fid)
+ fid->ref_count++;
+
+ return fid;
+}
+
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *nve_ifindex = fid->nve_ifindex;
+
+ return 0;
+}
+
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
__be32 vni)
{
@@ -173,7 +211,7 @@ bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid)
return fid->nve_flood_index_valid;
}
-int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni, int nve_ifindex)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
@@ -183,6 +221,7 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
if (WARN_ON(!ops->vni_set || fid->vni_valid))
return -EINVAL;
+ fid->nve_ifindex = nve_ifindex;
fid->vni = vni;
err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht,
&fid->vni_ht_node,
@@ -568,7 +607,7 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
{
- return fid->fid_index - fid->fid_family->start_index;
+ return fid->fid_index - VLAN_N_VID;
}
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -759,6 +798,40 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_FID,
.ops = &mlxsw_sp_fid_8021d_ops,
+ .lag_vid_valid = 1,
+};
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .flood_index = mlxsw_sp_fid_8021d_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+};
+
+/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
+#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
+#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
+ VLAN_VID_MASK - 2)
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_EMU_START,
+ .end_index = MLXSW_SP_FID_8021Q_EMU_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_emu_ops,
+ .lag_vid_valid = 1,
};
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
@@ -888,7 +961,7 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
};
static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
@@ -944,10 +1017,17 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_configure;
+ err = rhashtable_insert_fast(&mlxsw_sp->fid_core->fid_ht, &fid->ht_node,
+ mlxsw_sp_fid_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
list_add(&fid->list, &fid_family->fids_list);
fid->ref_count++;
return fid;
+err_rhashtable_insert:
+ fid->fid_family->ops->deconfigure(fid);
err_configure:
__clear_bit(fid_index - fid_family->start_index,
fid_family->fids_bitmap);
@@ -959,6 +1039,7 @@ err_index_alloc:
void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
if (--fid->ref_count == 1 && fid->rif) {
/* Destroy the associated RIF and let it drop the last
@@ -967,6 +1048,8 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
return mlxsw_sp_rif_destroy(fid->rif);
} else if (fid->ref_count == 0) {
list_del(&fid->list);
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->fid_ht,
+ &fid->ht_node, mlxsw_sp_fid_ht_params);
fid->fid_family->ops->deconfigure(fid);
__clear_bit(fid->fid_index - fid_family->start_index,
fid_family->fids_bitmap);
@@ -1126,9 +1209,13 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
mlxsw_sp->fid_core = fid_core;
+ err = rhashtable_init(&fid_core->fid_ht, &mlxsw_sp_fid_ht_params);
+ if (err)
+ goto err_rhashtable_fid_init;
+
err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params);
if (err)
- goto err_rhashtable_init;
+ goto err_rhashtable_vni_init;
fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
GFP_KERNEL);
@@ -1157,7 +1244,9 @@ err_fid_ops_register:
kfree(fid_core->port_fid_mappings);
err_alloc_port_fid_mappings:
rhashtable_destroy(&fid_core->vni_ht);
-err_rhashtable_init:
+err_rhashtable_vni_init:
+ rhashtable_destroy(&fid_core->fid_ht);
+err_rhashtable_fid_init:
kfree(fid_core);
return err;
}
@@ -1172,5 +1261,6 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
fid_core->fid_family_arr[i]);
kfree(fid_core->port_fid_mappings);
rhashtable_destroy(&fid_core->vni_ht);
+ rhashtable_destroy(&fid_core->fid_ht);
kfree(fid_core);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index ad06d9969bc1..c4d5a0865c8f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -174,6 +174,20 @@ mlxsw_sp_nve_mc_record_ops_arr[] = {
[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
};
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ addr->addr4 = cpu_to_be32(uip);
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
static struct mlxsw_sp_nve_mc_list *
mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_mc_list_key *key)
@@ -803,7 +817,7 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
return err;
}
- err = mlxsw_sp_fid_vni_set(fid, params->vni);
+ err = mlxsw_sp_fid_vni_set(fid, params->vni, params->dev->ifindex);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
goto err_fid_vni_set;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index d21c7be5b1c9..4e9cc00a88fd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -17,7 +17,8 @@
#define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128
#define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96
-#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS VXLAN_F_UDP_ZERO_CSUM_TX
+#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
+ VXLAN_F_LEARN)
static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
const struct net_device *dev,
@@ -61,11 +62,6 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return false;
}
- if (cfg->flags & VXLAN_F_LEARN) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Learning is not supported");
- return false;
- }
-
if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
return false;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 9e9bb57134f2..1557c5fc6d10 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -7296,6 +7296,15 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
.fdb_del = mlxsw_sp_rif_fid_fdb_del,
};
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
{
@@ -7364,7 +7373,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 739a51f0a366..3c2428404b2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1968,8 +1968,6 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_attr_get = mlxsw_sp_port_attr_get,
.switchdev_port_attr_set = mlxsw_sp_port_attr_set,
- .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
- .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
};
static int
@@ -2312,6 +2310,71 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
}
static void
+mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
+ enum mlxsw_sp_l3proto *proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ if (vxlan_addr->sa.sa_family == AF_INET) {
+ addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV4;
+ } else {
+ addr->addr6 = vxlan_addr->sin6.sin6_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV6;
+ }
+}
+
+static void
+mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ union vxlan_addr *vxlan_addr)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ vxlan_addr->sa.sa_family = AF_INET;
+ vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ vxlan_addr->sa.sa_family = AF_INET6;
+ vxlan_addr->sin6.sin6_addr = addr->addr6;
+ break;
+ }
+}
+
+static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
+ const char *mac,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ __be32 vni, bool adding)
+{
+ struct switchdev_notifier_vxlan_fdb_info info;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ enum switchdev_notifier_type type;
+
+ type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
+ info.remote_port = vxlan->cfg.dst_port;
+ info.remote_vni = vni;
+ info.remote_ifindex = 0;
+ ether_addr_copy(info.eth_addr, mac);
+ info.vni = vni;
+ info.offloaded = adding;
+ call_switchdev_notifiers(type, dev, &info.info);
+}
+
+static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
+ const char *mac,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ __be32 vni,
+ bool adding)
+{
+ if (netif_is_vxlan(dev))
+ mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
+ adding);
+}
+
+static void
mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
const char *mac, u16 vid,
struct net_device *dev, bool offloaded)
@@ -2419,7 +2482,8 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
- lag_vid = mlxsw_sp_port_vlan->vid;
+ lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
+ mlxsw_sp_port_vlan->vid : 0;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
@@ -2442,6 +2506,122 @@ just_remove:
goto do_fdb_op;
}
+static int
+__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid *fid,
+ bool adding,
+ struct net_device **nve_dev,
+ u16 *p_vid, __be32 *p_vni)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *br_dev, *dev;
+ int nve_ifindex;
+ int err;
+
+ err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni(fid, p_vni);
+ if (err)
+ return err;
+
+ dev = __dev_get_by_index(&init_net, nve_ifindex);
+ if (!dev)
+ return -EINVAL;
+ *nve_dev = dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
+ return -EINVAL;
+
+ if (adding && netif_is_vxlan(dev)) {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
+ return -EINVAL;
+ }
+
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ return -EINVAL;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return -EINVAL;
+
+ *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl,
+ int rec_index,
+ bool adding)
+{
+ enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
+ enum switchdev_notifier_type type;
+ struct net_device *nve_dev;
+ union mlxsw_sp_l3addr addr;
+ struct mlxsw_sp_fid *fid;
+ char mac[ETH_ALEN];
+ u16 fid_index, vid;
+ __be32 vni;
+ u32 uip;
+ int err;
+
+ mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
+ &uip, &sfn_proto);
+
+ fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
+ if (!fid)
+ goto err_fid_lookup;
+
+ err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr);
+ if (err)
+ goto err_ip_resolve;
+
+ err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
+ &nve_dev, &vid, &vni);
+ if (err)
+ goto err_fdb_process;
+
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr, adding, true);
+ if (err)
+ goto err_fdb_op;
+
+ mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr, vni, adding);
+
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
+
+ mlxsw_sp_fid_put(fid);
+
+ return;
+
+err_fdb_op:
+err_fdb_process:
+err_ip_resolve:
+ mlxsw_sp_fid_put(fid);
+err_fid_lookup:
+ /* Remove an FDB entry in case we cannot process it. Otherwise the
+ * device will keep sending the same notification over and over again.
+ */
+ mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+ (enum mlxsw_sp_l3proto) sfn_proto, &addr,
+ false, true);
+}
+
static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index)
{
@@ -2462,6 +2642,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
rec_index, false);
break;
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
+ mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
+ mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
}
}
@@ -2517,20 +2705,6 @@ struct mlxsw_sp_switchdev_event_work {
};
static void
-mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
- enum mlxsw_sp_l3proto *proto,
- union mlxsw_sp_l3addr *addr)
-{
- if (vxlan_addr->sa.sa_family == AF_INET) {
- addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
- *proto = MLXSW_SP_L3_PROTO_IPV4;
- } else {
- addr->addr6 = vxlan_addr->sin6.sin6_addr;
- *proto = MLXSW_SP_L3_PROTO_IPV6;
- }
-}
-
-static void
mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_switchdev_event_work *
switchdev_work,
@@ -2595,7 +2769,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
return;
- if (!switchdev_work->fdb_info.added_by_user)
+ if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
+ !switchdev_work->fdb_info.added_by_user)
return;
if (!netif_running(dev))
@@ -2942,6 +3117,32 @@ static struct notifier_block mlxsw_sp_switchdev_notifier = {
.notifier_call = mlxsw_sp_switchdev_event,
};
+static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_obj_del);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
+ .notifier_call = mlxsw_sp_switchdev_blocking_event,
+};
+
u8
mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
{
@@ -2951,6 +3152,7 @@ mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ struct notifier_block *nb;
int err;
err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
@@ -2965,17 +3167,33 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
return err;
}
+ nb = &mlxsw_sp_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
+ goto err_register_switchdev_blocking_notifier;
+ }
+
INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
return 0;
+
+err_register_switchdev_blocking_notifier:
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ return err;
}
static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct notifier_block *nb;
+
cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
- unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ nb = &mlxsw_sp_switchdev_blocking_notifier;
+ unregister_switchdev_blocking_notifier(nb);
+
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
}
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 3238b9ee42f3..7f8da8873a96 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1337,8 +1337,6 @@ static int ocelot_port_obj_del(struct net_device *dev,
static const struct switchdev_ops ocelot_port_switchdev_ops = {
.switchdev_port_attr_get = ocelot_port_attr_get,
.switchdev_port_attr_set = ocelot_port_attr_set,
- .switchdev_port_obj_add = ocelot_port_obj_add,
- .switchdev_port_obj_del = ocelot_port_obj_del,
};
static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
@@ -1595,6 +1593,34 @@ struct notifier_block ocelot_netdevice_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_netdevice_nb);
+static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ /* Blocking events. */
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_obj_del);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = ocelot_switchdev_blocking_event,
+};
+EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 62c7c8eb00d9..086775f7b52f 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -499,5 +499,6 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
struct phy_device *phy);
extern struct notifier_block ocelot_netdevice_nb;
+extern struct notifier_block ocelot_switchdev_blocking_nb;
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 4c23d18bbf44..ca3ea2fbfcd0 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -12,6 +12,7 @@
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
#include <linux/skbuff.h>
+#include <net/switchdev.h>
#include "ocelot.h"
@@ -328,6 +329,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
register_netdevice_notifier(&ocelot_netdevice_nb);
+ register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
dev_info(&pdev->dev, "Ocelot switch probed\n");
@@ -342,6 +344,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit(ocelot);
+ unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
return 0;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index f7a0d1d5885e..59e77e3086bb 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1695,17 +1695,10 @@ exit:
*/
void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- u32 max_frags;
struct __vxge_hw_channel *channel;
channel = &fifo->channel;
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
- (struct vxge_hw_fifo_txd *)txdlh);
-
- max_frags = fifo->config->max_frags;
-
vxge_hw_channel_dtr_free(channel, txdlh);
}
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 4afb10375397..47c708f08ade 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -56,7 +56,9 @@ endif
ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
nfp-objs += \
+ abm/cls.o \
abm/ctrl.o \
+ abm/qdisc.o \
abm/main.o
endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
new file mode 100644
index 000000000000..9852080cf454
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_net_repr.h"
+#include "main.h"
+
+struct nfp_abm_u32_match {
+ u32 handle;
+ u32 band;
+ u8 mask;
+ u8 val;
+ struct list_head list;
+};
+
+static bool
+nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
+ __be16 proto, struct netlink_ext_ack *extack)
+{
+ struct tc_u32_key *k;
+ unsigned int tos_off;
+
+ if (knode->exts && tcf_exts_has_actions(knode->exts)) {
+ NL_SET_ERR_MSG_MOD(extack, "action offload not supported");
+ return false;
+ }
+ if (knode->link_handle) {
+ NL_SET_ERR_MSG_MOD(extack, "linking not supported");
+ return false;
+ }
+ if (knode->sel->flags != TC_U32_TERMINAL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flags must be equal to TC_U32_TERMINAL");
+ return false;
+ }
+ if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
+ knode->sel->offoff || knode->fshift) {
+ NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+ return false;
+ }
+ if (knode->sel->hoff || knode->sel->hmask) {
+ NL_SET_ERR_MSG_MOD(extack, "hashing not supported");
+ return false;
+ }
+ if (knode->val || knode->mask) {
+ NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported");
+ return false;
+ }
+ if (knode->res && knode->res->class) {
+ NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported");
+ return false;
+ }
+ if (knode->res && knode->res->classid >= abm->num_bands) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "classid higher than number of bands");
+ return false;
+ }
+ if (knode->sel->nkeys != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "exactly one key required");
+ return false;
+ }
+
+ switch (proto) {
+ case htons(ETH_P_IP):
+ tos_off = 16;
+ break;
+ case htons(ETH_P_IPV6):
+ tos_off = 20;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol");
+ return false;
+ }
+
+ k = &knode->sel->keys[0];
+ if (k->offmask) {
+ NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+ return false;
+ }
+ if (k->off) {
+ NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched");
+ return false;
+ }
+ if (k->val & ~k->mask) {
+ NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key");
+ return false;
+ }
+ if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) {
+ NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used");
+ nfp_err(abm->app->cpp,
+ "u32 offload: requested mask %x FW can support only %x\n",
+ be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask);
+ return false;
+ }
+
+ return true;
+}
+
+/* This filter list -> map conversion is O(n * m), we expect single digit or
+ * low double digit number of prios and likewise for the filters. Also u32
+ * doesn't report stats, so it's really only setup time cost.
+ */
+static unsigned int
+nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio)
+{
+ struct nfp_abm_u32_match *iter;
+
+ list_for_each_entry(iter, &alink->dscp_map, list)
+ if ((prio & iter->mask) == iter->val)
+ return iter->band;
+
+ return alink->def_band;
+}
+
+static int nfp_abm_update_band_map(struct nfp_abm_link *alink)
+{
+ unsigned int i, bits_per_prio, prios_per_word, base_shift;
+ struct nfp_abm *abm = alink->abm;
+ u32 field_mask;
+
+ alink->has_prio = !list_empty(&alink->dscp_map);
+
+ bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands));
+ field_mask = (1 << bits_per_prio) - 1;
+ prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio;
+
+ /* FW mask applies from top bits */
+ base_shift = 8 - order_base_2(abm->num_prios);
+
+ for (i = 0; i < abm->num_prios; i++) {
+ unsigned int offset;
+ u32 *word;
+ u8 band;
+
+ word = &alink->prio_map[i / prios_per_word];
+ offset = (i % prios_per_word) * bits_per_prio;
+
+ band = nfp_abm_find_band_for_prio(alink, i << base_shift);
+
+ *word &= ~(field_mask << offset);
+ *word |= band << offset;
+ }
+
+ /* Qdisc offload status may change if has_prio changed */
+ nfp_abm_qdisc_offload_update(alink);
+
+ return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+}
+
+static void
+nfp_abm_u32_knode_delete(struct nfp_abm_link *alink,
+ struct tc_cls_u32_knode *knode)
+{
+ struct nfp_abm_u32_match *iter;
+
+ list_for_each_entry(iter, &alink->dscp_map, list)
+ if (iter->handle == knode->handle) {
+ list_del(&iter->list);
+ kfree(iter);
+ nfp_abm_update_band_map(alink);
+ return;
+ }
+}
+
+static int
+nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
+ struct tc_cls_u32_knode *knode,
+ __be16 proto, struct netlink_ext_ack *extack)
+{
+ struct nfp_abm_u32_match *match = NULL, *iter;
+ unsigned int tos_off;
+ u8 mask, val;
+ int err;
+
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
+ goto err_delete;
+
+ tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
+
+ /* Extract the DSCP Class Selector bits */
+ val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
+ mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
+
+ /* Check if there is no conflicting mapping and find match by handle */
+ list_for_each_entry(iter, &alink->dscp_map, list) {
+ u32 cmask;
+
+ if (iter->handle == knode->handle) {
+ match = iter;
+ continue;
+ }
+
+ cmask = iter->mask & mask;
+ if ((iter->val & cmask) == (val & cmask) &&
+ iter->band != knode->res->classid) {
+ NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
+ goto err_delete;
+ }
+ }
+
+ if (!match) {
+ match = kzalloc(sizeof(*match), GFP_KERNEL);
+ if (!match)
+ return -ENOMEM;
+ list_add(&match->list, &alink->dscp_map);
+ }
+ match->handle = knode->handle;
+ match->band = knode->res->classid;
+ match->mask = mask;
+ match->val = val;
+
+ err = nfp_abm_update_band_map(alink);
+ if (err)
+ goto err_delete;
+
+ return 0;
+
+err_delete:
+ nfp_abm_u32_knode_delete(alink, knode);
+ return -EOPNOTSUPP;
+}
+
+static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct tc_cls_u32_offload *cls_u32 = type_data;
+ struct nfp_repr *repr = cb_priv;
+ struct nfp_abm_link *alink;
+
+ alink = repr->app_priv;
+
+ if (type != TC_SETUP_CLSU32) {
+ NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+ "only offload of u32 classifier supported");
+ return -EOPNOTSUPP;
+ }
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common))
+ return -EOPNOTSUPP;
+
+ if (cls_u32->common.protocol != htons(ETH_P_IP) &&
+ cls_u32->common.protocol != htons(ETH_P_IPV6)) {
+ NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+ "only IP and IPv6 supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
+
+ switch (cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return nfp_abm_u32_knode_replace(alink, &cls_u32->knode,
+ cls_u32->common.protocol,
+ cls_u32->common.extack);
+ case TC_CLSU32_DELETE_KNODE:
+ nfp_abm_u32_knode_delete(alink, &cls_u32->knode);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_abm_setup_tc_block_cb,
+ repr, repr, f->extack);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
+ repr);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 3c661f422688..ad6c2a621c7a 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
+#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nffw.h"
@@ -11,38 +13,56 @@
#include "../nfp_net.h"
#include "main.h"
-#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u"
+#define NFP_NUM_PRIOS_SYM_NAME "_abi_pci_dscp_num_prio_%u"
+#define NFP_NUM_BANDS_SYM_NAME "_abi_pci_dscp_num_band_%u"
+#define NFP_ACT_MASK_SYM_NAME "_abi_nfd_out_q_actions_%u"
+
+#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u%s"
#define NFP_QLVL_STRIDE 16
#define NFP_QLVL_BLOG_BYTES 0
#define NFP_QLVL_BLOG_PKTS 4
#define NFP_QLVL_THRS 8
+#define NFP_QLVL_ACT 12
-#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats"
+#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats%s"
#define NFP_QMSTAT_STRIDE 32
#define NFP_QMSTAT_NON_STO 0
#define NFP_QMSTAT_STO 8
#define NFP_QMSTAT_DROP 16
#define NFP_QMSTAT_ECN 24
+#define NFP_Q_STAT_SYM_NAME "_abi_nfd_rxq_stats%u%s"
+#define NFP_Q_STAT_STRIDE 16
+#define NFP_Q_STAT_PKTS 0
+#define NFP_Q_STAT_BYTES 8
+
+#define NFP_NET_ABM_MBOX_CMD NFP_NET_CFG_MBOX_SIMPLE_CMD
+#define NFP_NET_ABM_MBOX_RET NFP_NET_CFG_MBOX_SIMPLE_RET
+#define NFP_NET_ABM_MBOX_DATALEN NFP_NET_CFG_MBOX_SIMPLE_VAL
+#define NFP_NET_ABM_MBOX_RESERVED (NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
+#define NFP_NET_ABM_MBOX_DATA (NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
+
static int
nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, unsigned int i,
- bool is_u64, u64 *res)
+ unsigned int stride, unsigned int offset, unsigned int band,
+ unsigned int queue, bool is_u64, u64 *res)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
u64 val, sym_offset;
+ unsigned int qid;
u32 val32;
int err;
- sym_offset = (alink->queue_base + i) * stride + offset;
+ qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ sym_offset = qid * stride + offset;
if (is_u64)
err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
else
err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
if (err) {
- nfp_err(cpp,
- "RED offload reading stat failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
+ alink->id, band, queue, alink->queue_base);
return err;
}
@@ -50,175 +70,179 @@ nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
return 0;
}
-static int
-nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, bool is_u64,
- u64 *res)
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
{
- u64 val, sum = 0;
- unsigned int i;
+ struct nfp_cpp *cpp = abm->app->cpp;
+ u64 sym_offset;
int err;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i,
- is_u64, &val);
- if (err)
- return err;
- sum += val;
+ __clear_bit(id, abm->threshold_undef);
+ if (abm->thresholds[id] == val)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
+ if (err) {
+ nfp_err(cpp,
+ "RED offload setting level failed on subqueue %d\n",
+ id);
+ return err;
}
- *res = sum;
+ abm->thresholds[id] = val;
return 0;
}
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, u32 val)
{
- struct nfp_cpp *cpp = alink->abm->app->cpp;
+ unsigned int threshold;
+
+ threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
+}
+
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+ enum nfp_abm_q_action act)
+{
+ struct nfp_cpp *cpp = abm->app->cpp;
u64 sym_offset;
int err;
- sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
- err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0,
- sym_offset, val);
+ if (abm->actions[id] == act)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act);
if (err) {
- nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp,
+ "RED offload setting action failed on subqueue %d\n",
+ id);
return err;
}
+ abm->actions[id] = act;
return 0;
}
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val)
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, enum nfp_abm_q_action act)
{
- int i, err;
+ unsigned int qid;
+
+ qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_set_q_lvl(alink, i, val);
- if (err)
- return err;
+ return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
+}
+
+u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
+{
+ unsigned int band;
+ u64 val, sum = 0;
+
+ for (band = 0; band < alink->abm->num_bands; band++) {
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO,
+ band, queue, true, &val))
+ return 0;
+ sum += val;
}
- return 0;
+ return sum;
}
-u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
+u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
{
- u64 val;
+ unsigned int band;
+ u64 val, sum = 0;
- if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
- NFP_QMSTAT_NON_STO, i, true, &val))
- return 0;
- return val;
+ for (band = 0; band < alink->abm->num_bands; band++) {
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO,
+ band, queue, true, &val))
+ return 0;
+ sum += val;
+ }
+
+ return sum;
}
-u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i)
+static int
+nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, unsigned int off, u64 *val)
{
- u64 val;
+ if (!nfp_abm_has_prio(alink->abm)) {
+ if (!band) {
+ unsigned int id = alink->queue_base + queue;
+
+ *val = nn_readq(alink->vnic,
+ NFP_NET_CFG_RXR_STATS(id) + off);
+ } else {
+ *val = 0;
+ }
- if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
- NFP_QMSTAT_STO, i, true, &val))
return 0;
- return val;
+ } else {
+ return nfp_abm_ctrl_stat(alink, alink->abm->q_stats,
+ NFP_Q_STAT_STRIDE, off, band, queue,
+ true, val);
+ }
}
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
- struct nfp_alink_stats *stats)
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, struct nfp_alink_stats *stats)
{
int err;
- stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
+ err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
+ &stats->tx_pkts);
+ if (err)
+ return err;
- err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- i, false, &stats->backlog_bytes);
+ err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
+ &stats->tx_bytes);
+ if (err)
+ return err;
+
+ err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE,
+ NFP_QLVL_BLOG_BYTES, band, queue, false,
+ &stats->backlog_bytes);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- i, false, &stats->backlog_pkts);
+ band, queue, false, &stats->backlog_pkts);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- i, true, &stats->drops);
+ band, queue, true, &stats->drops);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- i, true, &stats->overlimits);
-}
-
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats)
-{
- u64 pkts = 0, bytes = 0;
- int i, err;
-
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
- }
- stats->tx_pkts = pkts;
- stats->tx_bytes = bytes;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- false, &stats->backlog_bytes);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- false, &stats->backlog_pkts);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &stats->drops);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &stats->overlimits);
+ band, queue, true, &stats->overlimits);
}
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats)
{
int err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- i, true, &xstats->pdrop);
+ band, queue, true, &xstats->pdrop);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- i, true, &xstats->ecn_marked);
-}
-
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats)
-{
- int err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &xstats->pdrop);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &xstats->ecn_marked);
+ band, queue, true, &xstats->ecn_marked);
}
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
@@ -233,10 +257,64 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
NULL, 0, NULL, 0);
}
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
+{
+ struct nfp_net *nn = alink->vnic;
+ unsigned int i;
+ int err;
+
+ /* Write data_len and wipe reserved */
+ nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
+ alink->abm->prio_map_len);
+
+ for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32))
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
+ packed[i / sizeof(u32)]);
+
+ err = nfp_net_reconfig_mbox(nn,
+ NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+ if (err)
+ nfp_err(alink->abm->app->cpp,
+ "setting DSCP -> VQ map failed with error %d\n", err);
+ return err;
+}
+
+static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct nfp_net *nn = alink->vnic;
+ unsigned int min_mbox_sz;
+
+ if (!nfp_abm_has_prio(alink->abm))
+ return 0;
+
+ min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len;
+ if (nn->tlv_caps.mbox_len < min_mbox_sz) {
+ nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n",
+ nn->tlv_caps.mbox_len, min_mbox_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
{
alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
alink->queue_base /= alink->vnic->stride_rx;
+
+ return nfp_abm_ctrl_prio_check_params(alink);
+}
+
+static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm)
+{
+ unsigned int size;
+
+ size = roundup_pow_of_two(order_base_2(abm->num_bands));
+ size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE);
+ size = round_up(size, sizeof(u32));
+
+ return size;
}
static const struct nfp_rtsym *
@@ -260,33 +338,77 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
}
static const struct nfp_rtsym *
-nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name,
- unsigned int size)
+nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt,
+ size_t size)
{
- return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS);
+ char pf_symbol[64];
+
+ size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS);
+ snprintf(pf_symbol, sizeof(pf_symbol), name_fmt,
+ abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : "");
+
+ return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size);
}
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
{
struct nfp_pf *pf = abm->app->pf;
const struct nfp_rtsym *sym;
- unsigned int pf_id;
- char pf_symbol[64];
-
- pf_id = nfp_cppcore_pcie_unit(pf->cpp);
- abm->pf_id = pf_id;
+ int res;
+
+ abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp);
+
+ /* Read count of prios and prio bands */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->num_bands = res;
+
+ res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->num_prios = res;
+
+ /* Read available actions */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME,
+ BIT(NFP_ABM_ACT_MARK_DROP));
+ if (res < 0)
+ return res;
+ abm->action_mask = res;
+
+ abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm);
+ abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios));
+
+ /* Check values are sane, U16_MAX is arbitrarily chosen as max */
+ if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) ||
+ abm->num_bands > U16_MAX || abm->num_prios > U16_MAX ||
+ (abm->num_bands == 1) != (abm->num_prios == 1)) {
+ nfp_err(pf->cpp,
+ "invalid priomap description num bands: %u and num prios: %u\n",
+ abm->num_bands, abm->num_prios);
+ return -EINVAL;
+ }
- snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id);
- sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE);
+ /* Find level and stat symbols */
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME,
+ NFP_QLVL_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->q_lvls = sym;
- snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id);
- sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE);
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME,
+ NFP_QMSTAT_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->qm_stats = sym;
+ if (nfp_abm_has_prio(abm)) {
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME,
+ NFP_Q_STAT_STRIDE);
+ if (IS_ERR(sym))
+ return PTR_ERR(sym);
+ abm->q_stats = sym;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index c0830c0c2c3f..7a4d55f794c2 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -2,14 +2,13 @@
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
#include <linux/slab.h>
-#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
-#include <net/red.h>
#include "../nfpcore/nfp.h"
#include "../nfpcore/nfp_cpp.h"
@@ -28,269 +27,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
}
static int
-__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs, u32 init_val)
-{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
- int ret;
-
- ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
- memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
-
- alink->parent = handle;
- alink->num_qdiscs = qs;
- port->tc_offload_cnt = qs;
-
- return ret;
-}
-
-static void
-nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs)
-{
- __nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
-}
-
-static int
-nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- unsigned int i = TC_H_MIN(opt->parent) - 1;
-
- if (opt->parent == TC_H_ROOT)
- i = 0;
- else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
- i = TC_H_MIN(opt->parent) - 1;
- else
- return -EOPNOTSUPP;
-
- if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
- return -EOPNOTSUPP;
-
- return i;
-}
-
-static void
-nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle)
-{
- unsigned int i;
-
- for (i = 0; i < alink->num_qdiscs; i++)
- if (handle == alink->qdiscs[i].handle)
- break;
- if (i == alink->num_qdiscs)
- return;
-
- if (alink->parent == TC_H_ROOT) {
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- } else {
- nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
- memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
- }
-}
-
-static int
-nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- bool existing;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- existing = i >= 0;
-
- if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
- nfp_warn(alink->abm->app->cpp,
- "RED offload failed - unsupported parameters\n");
- err = -EINVAL;
- goto err_destroy;
- }
-
- if (existing) {
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
- else
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- if (err)
- goto err_destroy;
- return 0;
- }
-
- if (opt->parent == TC_H_ROOT) {
- i = 0;
- err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
- opt->set.min);
- } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
- i = TC_H_MIN(opt->parent) - 1;
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- } else {
- return -EINVAL;
- }
- /* Set the handle to try full clean up, in case IO failed */
- alink->qdiscs[i].handle = opt->handle;
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i,
- &alink->qdiscs[i].stats);
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink,
- &alink->qdiscs[i].xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i,
- &alink->qdiscs[i].xstats);
- if (err)
- goto err_destroy;
-
- alink->qdiscs[i].stats.backlog_pkts = 0;
- alink->qdiscs[i].stats.backlog_bytes = 0;
-
- return 0;
-err_destroy:
- /* If the qdisc keeps on living, but we can't offload undo changes */
- if (existing) {
- opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
- opt->set.qstats->backlog -=
- alink->qdiscs[i].stats.backlog_bytes;
- }
- nfp_abm_red_destroy(netdev, alink, opt->handle);
-
- return err;
-}
-
-static void
-nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
- struct tc_qopt_offload_stats *stats)
-{
- _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
- new->tx_pkts - old->tx_pkts);
- stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
- stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
- stats->qstats->overlimits += new->overlimits - old->overlimits;
- stats->qstats->drops += new->drops - old->drops;
-}
-
-static int
-nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_stats *prev_stats;
- struct nfp_alink_stats stats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_stats = &alink->qdiscs[i].stats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
-
- *prev_stats = stats;
-
- return 0;
-}
-
-static int
-nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_xstats *prev_xstats;
- struct nfp_alink_xstats xstats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_xstats = &alink->qdiscs[i].xstats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink, &xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
- if (err)
- return err;
-
- opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
- opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
-
- *prev_xstats = xstats;
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_RED_REPLACE:
- return nfp_abm_red_replace(netdev, alink, opt);
- case TC_RED_DESTROY:
- nfp_abm_red_destroy(netdev, alink, opt->handle);
- return 0;
- case TC_RED_STATS:
- return nfp_abm_red_stats(alink, opt);
- case TC_RED_XSTATS:
- return nfp_abm_red_xstats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
-nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
-{
- struct nfp_alink_stats stats;
- unsigned int i;
- int err;
-
- for (i = 0; i < alink->num_qdiscs; i++) {
- if (alink->qdiscs[i].handle == TC_H_UNSPEC)
- continue;
-
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
- &opt->stats);
- }
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_mq_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_MQ_CREATE:
- nfp_abm_reset_root(netdev, alink, opt->handle,
- alink->total_queues);
- return 0;
- case TC_MQ_DESTROY:
- if (opt->handle == alink->parent)
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- return 0;
- case TC_MQ_STATS:
- return nfp_abm_mq_stats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
{
@@ -302,10 +38,16 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
switch (type) {
+ case TC_SETUP_ROOT_QDISC:
+ return nfp_abm_setup_root(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_MQ:
return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_RED:
return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data);
+ case TC_SETUP_QDISC_GRED:
+ return nfp_abm_setup_tc_gred(netdev, repr->app_priv, type_data);
+ case TC_SETUP_BLOCK:
+ return nfp_abm_setup_cls_block(netdev, repr, type_data);
default:
return -EOPNOTSUPP;
}
@@ -573,31 +315,34 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->abm = abm;
alink->vnic = nn;
alink->id = id;
- alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings;
- alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
- GFP_KERNEL);
- if (!alink->qdiscs) {
- err = -ENOMEM;
+
+ INIT_LIST_HEAD(&alink->dscp_map);
+
+ err = nfp_abm_ctrl_read_params(alink);
+ if (err)
+ goto err_free_alink;
+
+ alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
+ if (!alink->prio_map)
goto err_free_alink;
- }
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
*/
err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
if (err < 0)
- goto err_free_qdiscs;
+ goto err_free_priomap;
netif_keep_dst(nn->dp.netdev);
nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
- nfp_abm_ctrl_read_params(alink);
+ INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL);
return 0;
-err_free_qdiscs:
- kvfree(alink->qdiscs);
+err_free_priomap:
+ kfree(alink->prio_map);
err_free_alink:
kfree(alink);
return err;
@@ -608,10 +353,20 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
struct nfp_abm_link *alink = nn->app_priv;
nfp_abm_kill_reprs(alink->abm, alink);
- kvfree(alink->qdiscs);
+ WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n");
+ kfree(alink->prio_map);
kfree(alink);
}
+static int nfp_abm_vnic_init(struct nfp_app *app, struct nfp_net *nn)
+{
+ struct nfp_abm_link *alink = nn->app_priv;
+
+ if (nfp_abm_has_prio(alink->abm))
+ return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+ return 0;
+}
+
static u64 *
nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data)
{
@@ -664,6 +419,7 @@ static int nfp_abm_init(struct nfp_app *app)
struct nfp_pf *pf = app->pf;
struct nfp_reprs *reprs;
struct nfp_abm *abm;
+ unsigned int i;
int err;
if (!pf->eth_tbl) {
@@ -690,15 +446,35 @@ static int nfp_abm_init(struct nfp_app *app)
if (err)
goto err_free_abm;
+ err = -ENOMEM;
+ abm->num_thresholds = array_size(abm->num_bands, NFP_NET_MAX_RX_RINGS);
+ abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL);
+ if (!abm->threshold_undef)
+ goto err_free_abm;
+
+ abm->thresholds = kvcalloc(abm->num_thresholds,
+ sizeof(*abm->thresholds), GFP_KERNEL);
+ if (!abm->thresholds)
+ goto err_free_thresh_umap;
+ for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++)
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+ abm->actions = kvcalloc(abm->num_thresholds, sizeof(*abm->actions),
+ GFP_KERNEL);
+ if (!abm->actions)
+ goto err_free_thresh;
+ for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++)
+ __nfp_abm_ctrl_set_q_act(abm, i, NFP_ABM_ACT_DROP);
+
/* We start in legacy mode, make sure advanced queuing is disabled */
err = nfp_abm_ctrl_qm_disable(abm);
if (err)
- goto err_free_abm;
+ goto err_free_act;
err = -ENOMEM;
reprs = nfp_reprs_alloc(pf->max_data_vnics);
if (!reprs)
- goto err_free_abm;
+ goto err_free_act;
RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
reprs = nfp_reprs_alloc(pf->max_data_vnics);
@@ -710,6 +486,12 @@ static int nfp_abm_init(struct nfp_app *app)
err_free_phys:
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+err_free_act:
+ kvfree(abm->actions);
+err_free_thresh:
+ kvfree(abm->thresholds);
+err_free_thresh_umap:
+ bitmap_free(abm->threshold_undef);
err_free_abm:
kfree(abm);
app->priv = NULL;
@@ -723,6 +505,9 @@ static void nfp_abm_clean(struct nfp_app *app)
nfp_abm_eswitch_clean_up(abm);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+ bitmap_free(abm->threshold_undef);
+ kvfree(abm->actions);
+ kvfree(abm->thresholds);
kfree(abm);
app->priv = NULL;
}
@@ -736,6 +521,7 @@ const struct nfp_app_type app_abm = {
.vnic_alloc = nfp_abm_vnic_alloc,
.vnic_free = nfp_abm_vnic_free,
+ .vnic_init = nfp_abm_vnic_init,
.port_get_stats = nfp_abm_port_get_stats,
.port_get_stats_count = nfp_abm_port_get_stats_count,
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index f907b7d98917..4dcf5881fb4b 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -4,7 +4,19 @@
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
#include <net/devlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz;
+ * 2.5ms / 400Hz seems more than sufficient for stats resolution.
+ */
+#define NFP_ABM_STATS_REFRESH_IVAL (2500 * 1000) /* ns */
+
+#define NFP_ABM_LVL_INFINITY S32_MAX
struct nfp_app;
struct nfp_net;
@@ -12,21 +24,62 @@ struct nfp_net;
#define NFP_ABM_PORTID_TYPE GENMASK(23, 16)
#define NFP_ABM_PORTID_ID GENMASK(7, 0)
+/* The possible actions if thresholds are exceeded */
+enum nfp_abm_q_action {
+ /* mark if ECN capable, otherwise drop */
+ NFP_ABM_ACT_MARK_DROP = 0,
+ /* mark if ECN capable, otherwise goto QM */
+ NFP_ABM_ACT_MARK_QUEUE = 1,
+ NFP_ABM_ACT_DROP = 2,
+ NFP_ABM_ACT_QUEUE = 3,
+ NFP_ABM_ACT_NOQUEUE = 4,
+};
+
/**
* struct nfp_abm - ABM NIC app structure
* @app: back pointer to nfp_app
* @pf_id: ID of our PF link
+ *
+ * @num_prios: number of supported DSCP priorities
+ * @num_bands: number of supported DSCP priority bands
+ * @action_mask: bitmask of supported actions
+ *
+ * @thresholds: current threshold configuration
+ * @threshold_undef: bitmap of thresholds which have not been set
+ * @actions: current FW action configuration
+ * @num_thresholds: number of @thresholds and bits in @threshold_undef
+ *
+ * @prio_map_len: computed length of FW priority map (in bytes)
+ * @dscp_mask: mask FW will apply on DSCP field
+ *
* @eswitch_mode: devlink eswitch mode, advanced functions only visible
* in switchdev mode
+ *
* @q_lvls: queue level control area
* @qm_stats: queue statistics symbol
+ * @q_stats: basic queue statistics (only in per-band case)
*/
struct nfp_abm {
struct nfp_app *app;
unsigned int pf_id;
+
+ unsigned int num_prios;
+ unsigned int num_bands;
+ unsigned int action_mask;
+
+ u32 *thresholds;
+ unsigned long *threshold_undef;
+ u8 *actions;
+ size_t num_thresholds;
+
+ unsigned int prio_map_len;
+ u8 dscp_mask;
+
enum devlink_eswitch_mode eswitch_mode;
+
const struct nfp_rtsym *q_lvls;
const struct nfp_rtsym *qm_stats;
+ const struct nfp_rtsym *q_stats;
};
/**
@@ -57,16 +110,76 @@ struct nfp_alink_xstats {
u64 pdrop;
};
+enum nfp_qdisc_type {
+ NFP_QDISC_NONE = 0,
+ NFP_QDISC_MQ,
+ NFP_QDISC_RED,
+ NFP_QDISC_GRED,
+};
+
+#define NFP_QDISC_UNTRACKED ((struct nfp_qdisc *)1UL)
+
/**
- * struct nfp_red_qdisc - representation of single RED Qdisc
- * @handle: handle of currently offloaded RED Qdisc
- * @stats: statistics from last refresh
- * @xstats: base of extended statistics
+ * struct nfp_qdisc - tracked TC Qdisc
+ * @netdev: netdev on which Qdisc was created
+ * @type: Qdisc type
+ * @handle: handle of this Qdisc
+ * @parent_handle: handle of the parent (unreliable if Qdisc was grafted)
+ * @use_cnt: number of attachment points in the hierarchy
+ * @num_children: current size of the @children array
+ * @children: pointers to children
+ *
+ * @params_ok: parameters of this Qdisc are OK for offload
+ * @offload_mark: offload refresh state - selected for offload
+ * @offloaded: Qdisc is currently offloaded to the HW
+ *
+ * @mq: MQ Qdisc specific parameters and state
+ * @mq.stats: current stats of the MQ Qdisc
+ * @mq.prev_stats: previously reported @mq.stats
+ *
+ * @red: RED Qdisc specific parameters and state
+ * @red.num_bands: Number of valid entries in the @red.band table
+ * @red.band: Per-band array of RED instances
+ * @red.band.ecn: ECN marking is enabled (rather than drop)
+ * @red.band.threshold: ECN marking threshold
+ * @red.band.stats: current stats of the RED Qdisc
+ * @red.band.prev_stats: previously reported @red.stats
+ * @red.band.xstats: extended stats for RED - current
+ * @red.band.prev_xstats: extended stats for RED - previously reported
*/
-struct nfp_red_qdisc {
+struct nfp_qdisc {
+ struct net_device *netdev;
+ enum nfp_qdisc_type type;
u32 handle;
- struct nfp_alink_stats stats;
- struct nfp_alink_xstats xstats;
+ u32 parent_handle;
+ unsigned int use_cnt;
+ unsigned int num_children;
+ struct nfp_qdisc **children;
+
+ bool params_ok;
+ bool offload_mark;
+ bool offloaded;
+
+ union {
+ /* NFP_QDISC_MQ */
+ struct {
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ } mq;
+ /* TC_SETUP_QDISC_RED, TC_SETUP_QDISC_GRED */
+ struct {
+ unsigned int num_bands;
+
+ struct {
+ bool ecn;
+ u32 threshold;
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ struct nfp_alink_xstats xstats;
+ struct nfp_alink_xstats prev_xstats;
+ } band[MAX_DPs];
+ } red;
+ };
};
/**
@@ -76,9 +189,17 @@ struct nfp_red_qdisc {
* @id: id of the data vNIC
* @queue_base: id of base to host queue within PCIe (not QC idx)
* @total_queues: number of PF queues
- * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
- * @num_qdiscs: number of currently used qdiscs
- * @qdiscs: array of qdiscs
+ *
+ * @last_stats_update: ktime of last stats update
+ *
+ * @prio_map: current map of priorities
+ * @has_prio: @prio_map is valid
+ *
+ * @def_band: default band to use
+ * @dscp_map: list of DSCP to band mappings
+ *
+ * @root_qdisc: pointer to the current root of the Qdisc hierarchy
+ * @qdiscs: all qdiscs recorded by major part of the handle
*/
struct nfp_abm_link {
struct nfp_abm *abm;
@@ -86,26 +207,65 @@ struct nfp_abm_link {
unsigned int id;
unsigned int queue_base;
unsigned int total_queues;
- u32 parent;
- unsigned int num_qdiscs;
- struct nfp_red_qdisc *qdiscs;
+
+ u64 last_stats_update;
+
+ u32 *prio_map;
+ bool has_prio;
+
+ u8 def_band;
+ struct list_head dscp_map;
+
+ struct nfp_qdisc *root_qdisc;
+ struct radix_tree_root qdiscs;
};
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
+static inline bool nfp_abm_has_prio(struct nfp_abm *abm)
+{
+ return abm->num_bands > 1;
+}
+
+static inline bool nfp_abm_has_drop(struct nfp_abm *abm)
+{
+ return abm->action_mask & BIT(NFP_ABM_ACT_DROP);
+}
+
+static inline bool nfp_abm_has_mark(struct nfp_abm *abm)
+{
+ return abm->action_mask & BIT(NFP_ABM_ACT_MARK_DROP);
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink);
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt);
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt);
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt);
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt);
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+ struct tc_block_offload *opt);
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
- u32 val);
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val);
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, u32 val);
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+ enum nfp_abm_q_action act);
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, enum nfp_abm_q_action act);
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats);
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats);
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i);
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm);
int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm);
+void nfp_abm_prio_map_update(struct nfp_abm *abm);
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
new file mode 100644
index 000000000000..2473fb5f75e5
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/red.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
+{
+ return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
+}
+
+static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
+{
+ return qdisc->children[id] &&
+ qdisc->children[id] != NFP_QDISC_UNTRACKED;
+}
+
+static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
+{
+ return rtnl_dereference(*slot);
+}
+
+static void
+nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
+ struct nfp_alink_stats *child)
+{
+ parent->tx_pkts += child->tx_pkts;
+ parent->tx_bytes += child->tx_bytes;
+ parent->backlog_pkts += child->backlog_pkts;
+ parent->backlog_bytes += child->backlog_bytes;
+ parent->overlimits += child->overlimits;
+ parent->drops += child->drops;
+}
+
+static void
+nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ unsigned int i;
+ int err;
+
+ if (!qdisc->offloaded)
+ return;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
+ &qdisc->red.band[i].stats);
+ if (err)
+ nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
+ i, queue, err);
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
+ &qdisc->red.band[i].xstats);
+ if (err)
+ nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
+ i, queue, err);
+ }
+}
+
+static void
+nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ if (qdisc->type != NFP_QDISC_MQ)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i))
+ nfp_abm_stats_update_red(alink, qdisc->children[i], i);
+}
+
+static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
+{
+ alink->last_stats_update = time_now;
+ if (alink->root_qdisc)
+ nfp_abm_stats_update_mq(alink, alink->root_qdisc);
+}
+
+static void nfp_abm_stats_update(struct nfp_abm_link *alink)
+{
+ u64 now;
+
+ /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
+ * of all their leafs, so we would read the same stat multiple times
+ * for every dump.
+ */
+ now = ktime_get();
+ if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
+ return;
+
+ __nfp_abm_stats_update(alink, now);
+}
+
+static void
+nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i)) {
+ qdisc->children[i]->use_cnt--;
+ qdisc->children[i] = NULL;
+ }
+}
+
+static void
+nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ /* Don't complain when qdisc is getting unlinked */
+ if (qdisc->use_cnt)
+ nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
+ qdisc->handle);
+
+ if (!nfp_abm_qdisc_is_red(qdisc))
+ return;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ qdisc->red.band[i].stats.backlog_pkts = 0;
+ qdisc->red.band[i].stats.backlog_bytes = 0;
+ }
+}
+
+static int
+__nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, struct nfp_alink_stats *prev_stats,
+ struct nfp_alink_xstats *prev_xstats)
+{
+ u64 backlog_pkts, backlog_bytes;
+ int err;
+
+ /* Don't touch the backlog, backlog can only be reset after it has
+ * been reported back to the tc qdisc stats.
+ */
+ backlog_pkts = prev_stats->backlog_pkts;
+ backlog_bytes = prev_stats->backlog_bytes;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED stats init (%d, %d) failed with error %d\n",
+ band, queue, err);
+ return err;
+ }
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED xstats init (%d, %d) failed with error %d\n",
+ band, queue, err);
+ return err;
+ }
+
+ prev_stats->backlog_pkts = backlog_pkts;
+ prev_stats->backlog_bytes = backlog_bytes;
+ return 0;
+}
+
+static int
+nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ err = __nfp_abm_stats_init(alink, i, queue,
+ &qdisc->red.band[i].prev_stats,
+ &qdisc->red.band[i].prev_xstats);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ bool good_red, good_gred;
+ unsigned int i;
+
+ good_red = qdisc->type == NFP_QDISC_RED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1 &&
+ !alink->has_prio &&
+ !qdisc->children[0];
+ good_gred = qdisc->type == NFP_QDISC_GRED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1;
+ qdisc->offload_mark = good_red || good_gred;
+
+ /* If we are starting offload init prev_stats */
+ if (qdisc->offload_mark && !qdisc->offloaded)
+ if (nfp_abm_stats_init(alink, qdisc, queue))
+ qdisc->offload_mark = false;
+
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->abm->num_bands; i++) {
+ enum nfp_abm_q_action act;
+
+ nfp_abm_ctrl_set_q_lvl(alink, i, queue,
+ qdisc->red.band[i].threshold);
+ act = qdisc->red.band[i].ecn ?
+ NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
+ nfp_abm_ctrl_set_q_act(alink, i, queue, act);
+ }
+}
+
+static void
+nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++) {
+ struct nfp_qdisc *child = qdisc->children[i];
+
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ nfp_abm_offload_compile_red(alink, child, i);
+ }
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct radix_tree_iter iter;
+ struct nfp_qdisc *qdisc;
+ void __rcu **slot;
+ size_t i;
+
+ /* Mark all thresholds as unconfigured */
+ for (i = 0; i < abm->num_bands; i++)
+ __bitmap_set(abm->threshold_undef,
+ i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
+ alink->total_queues);
+
+ /* Clear offload marks */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ qdisc->offload_mark = false;
+ }
+
+ if (alink->root_qdisc)
+ nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
+
+ /* Refresh offload status */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ if (!qdisc->offload_mark && qdisc->offloaded)
+ nfp_abm_qdisc_offload_stop(alink, qdisc);
+ qdisc->offloaded = qdisc->offload_mark;
+ }
+
+ /* Reset the unconfigured thresholds */
+ for (i = 0; i < abm->num_thresholds; i++)
+ if (test_bit(i, abm->threshold_undef))
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+ __nfp_abm_stats_update(alink, ktime_get());
+}
+
+static void
+nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct radix_tree_iter iter;
+ unsigned int mq_refs = 0;
+ void __rcu **slot;
+
+ if (!qdisc->use_cnt)
+ return;
+ /* MQ doesn't notify well on destruction, we need special handling of
+ * MQ's children.
+ */
+ if (qdisc->type == NFP_QDISC_MQ &&
+ qdisc == alink->root_qdisc &&
+ netdev->reg_state == NETREG_UNREGISTERING)
+ return;
+
+ /* Count refs held by MQ instances and clear pointers */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
+ unsigned int i;
+
+ if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
+ continue;
+ for (i = 0; i < mq->num_children; i++)
+ if (mq->children[i] == qdisc) {
+ mq->children[i] = NULL;
+ mq_refs++;
+ }
+ }
+
+ WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
+ qdisc->use_cnt, mq_refs);
+}
+
+static void
+nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+
+ if (!qdisc)
+ return;
+ nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
+ WARN_ON(radix_tree_delete(&alink->qdiscs,
+ TC_H_MAJ(qdisc->handle)) != qdisc);
+
+ kfree(qdisc->children);
+ kfree(qdisc);
+
+ port->tc_offload_cnt--;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_qdisc *qdisc;
+ int err;
+
+ qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
+ if (!qdisc)
+ return NULL;
+
+ if (children) {
+ qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
+ if (!qdisc->children)
+ goto err_free_qdisc;
+ }
+
+ qdisc->netdev = netdev;
+ qdisc->type = type;
+ qdisc->parent_handle = parent_handle;
+ qdisc->handle = handle;
+ qdisc->num_children = children;
+
+ err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "Qdisc insertion into radix tree failed: %d\n", err);
+ goto err_free_child_tbl;
+ }
+
+ port->tc_offload_cnt++;
+ return qdisc;
+
+err_free_child_tbl:
+ kfree(qdisc->children);
+err_free_qdisc:
+ kfree(qdisc);
+ return NULL;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
+{
+ return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
+}
+
+static int
+nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children, struct nfp_qdisc **qdisc)
+{
+ *qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (*qdisc) {
+ if (WARN_ON((*qdisc)->type != type))
+ return -EINVAL;
+ return 1;
+ }
+
+ *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
+ children);
+ return *qdisc ? 0 : -ENOMEM;
+}
+
+static void
+nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle)
+{
+ struct nfp_qdisc *qdisc;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return;
+
+ /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
+ if (alink->root_qdisc == qdisc)
+ qdisc->use_cnt--;
+
+ nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
+ nfp_abm_qdisc_free(netdev, alink, qdisc);
+
+ if (alink->root_qdisc == qdisc) {
+ alink->root_qdisc = NULL;
+ /* Only root change matters, other changes are acted upon on
+ * the graft notification.
+ */
+ nfp_abm_qdisc_offload_update(alink);
+ }
+}
+
+static int
+nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
+ unsigned int id)
+{
+ struct nfp_qdisc *parent, *child;
+
+ parent = nfp_abm_qdisc_find(alink, handle);
+ if (!parent)
+ return 0;
+
+ if (WARN(id >= parent->num_children,
+ "graft child out of bound %d >= %d\n",
+ id, parent->num_children))
+ return -EINVAL;
+
+ nfp_abm_qdisc_unlink_children(parent, id, id + 1);
+
+ child = nfp_abm_qdisc_find(alink, child_handle);
+ if (child)
+ child->use_cnt++;
+ else
+ child = NFP_QDISC_UNTRACKED;
+ parent->children[id] = child;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+static void
+nfp_abm_stats_calculate(struct nfp_alink_stats *new,
+ struct nfp_alink_stats *old,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_queue *qstats)
+{
+ _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
+ new->tx_pkts - old->tx_pkts);
+ qstats->qlen += new->backlog_pkts - old->backlog_pkts;
+ qstats->backlog += new->backlog_bytes - old->backlog_bytes;
+ qstats->overlimits += new->overlimits - old->overlimits;
+ qstats->drops += new->drops - old->drops;
+}
+
+static void
+nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
+ struct nfp_alink_xstats *old,
+ struct red_stats *stats)
+{
+ stats->forced_mark += new->ecn_marked - old->ecn_marked;
+ stats->pdrop += new->pdrop - old->pdrop;
+}
+
+static int
+nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_gred_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+ unsigned int i;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ if (!stats->xstats[i])
+ continue;
+
+ nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
+ &qdisc->red.band[i].prev_stats,
+ &stats->bstats[i], &stats->qstats[i]);
+ qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
+ &qdisc->red.band[i].prev_xstats,
+ stats->xstats[i]);
+ qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
+ }
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_gred_check_params(struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_abm *abm = alink->abm;
+ unsigned int i;
+
+ if (opt->set.grio_on || opt->set.wred_on) {
+ nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.dp_def != alink->def_band) {
+ nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
+ alink->def_band, opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.dp_cnt != abm->num_bands) {
+ nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
+ abm->num_bands, opt->parent, opt->handle);
+ return false;
+ }
+
+ for (i = 0; i < abm->num_bands; i++) {
+ struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
+
+ if (!band->present)
+ return false;
+ if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
+ nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->is_ecn && !nfp_abm_has_mark(abm)) {
+ nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->is_harddrop) {
+ nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->min != band->max) {
+ nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->min > S32_MAX) {
+ nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
+ band->min, S32_MAX, opt->parent, opt->handle,
+ i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ unsigned int i;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
+ opt->handle, 0, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
+ if (qdisc->params_ok) {
+ qdisc->red.num_bands = opt->set.dp_cnt;
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
+ qdisc->red.band[i].threshold = opt->set.tab[i].min;
+ }
+ }
+
+ if (qdisc->use_cnt)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_GRED_REPLACE:
+ return nfp_abm_gred_replace(netdev, alink, opt);
+ case TC_GRED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_GRED_STATS:
+ return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (!qdisc || !qdisc->offloaded)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
+ &qdisc->red.band[0].prev_xstats,
+ opt->xstats);
+ qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
+ return 0;
+}
+
+static int
+nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
+ &qdisc->red.band[0].prev_stats,
+ stats->bstats, stats->qstats);
+ qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_red_check_params(struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_abm *abm = alink->abm;
+
+ if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
+ nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
+ nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_harddrop) {
+ nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min != opt->set.max) {
+ nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min > NFP_ABM_LVL_INFINITY) {
+ nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
+ opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
+ opt->handle);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
+ opt->handle, 1, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ /* If limit != 0 child gets reset */
+ if (opt->set.limit) {
+ if (nfp_abm_qdisc_child_valid(qdisc, 0))
+ qdisc->children[0]->use_cnt--;
+ qdisc->children[0] = NULL;
+ } else {
+ /* Qdisc was just allocated without a limit will use noop_qdisc,
+ * i.e. a block hole.
+ */
+ if (!ret)
+ qdisc->children[0] = NFP_QDISC_UNTRACKED;
+ }
+
+ qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
+ if (qdisc->params_ok) {
+ qdisc->red.num_bands = 1;
+ qdisc->red.band[0].ecn = opt->set.is_ecn;
+ qdisc->red.band[0].threshold = opt->set.min;
+ }
+
+ if (qdisc->use_cnt == 1)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_RED_REPLACE:
+ return nfp_abm_red_replace(netdev, alink, opt);
+ case TC_RED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_RED_STATS:
+ return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
+ case TC_RED_XSTATS:
+ return nfp_abm_red_xstats(alink, opt);
+ case TC_RED_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->child_handle, 0);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
+ TC_H_ROOT, opt->handle, alink->total_queues,
+ &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = true;
+ qdisc->offloaded = true;
+ nfp_abm_qdisc_offload_update(alink);
+ return 0;
+}
+
+static int
+nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc, *red;
+ unsigned int i, j;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_update(alink);
+
+ /* MQ stats are summed over the children in the core, so we need
+ * to add up the unreported child values.
+ */
+ memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
+ memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
+
+ for (i = 0; i < qdisc->num_children; i++) {
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
+ continue;
+ red = qdisc->children[i];
+
+ for (j = 0; j < red->red.num_bands; j++) {
+ nfp_abm_stats_propagate(&qdisc->mq.stats,
+ &red->red.band[j].stats);
+ nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
+ &red->red.band[j].prev_stats);
+ }
+ }
+
+ nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
+ stats->bstats, stats->qstats);
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_MQ_CREATE:
+ return nfp_abm_mq_create(netdev, alink, opt);
+ case TC_MQ_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_MQ_STATS:
+ return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
+ case TC_MQ_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->graft_params.child_handle,
+ opt->graft_params.queue);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt)
+{
+ if (opt->ingress)
+ return -EOPNOTSUPP;
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt--;
+ alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt++;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 6243af0ab025..dccae0319204 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app)
app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
}
- bpf->bpf_dev = bpf_offload_dev_create();
+ bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err)
goto err_free_neutral_maps;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 7f591d71ab28..941277936475 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -509,7 +509,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
-extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
+int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
+ int prev_insn_idx);
+int nfp_bpf_finalize(struct bpf_verifier_env *env);
+
+extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
struct netdev_bpf;
struct nfp_app;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index ba8ceedcf6a2..f0283854fade 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -33,9 +33,6 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct nfp_bpf_neutral_map *record;
int err;
- /* Map record paths are entered via ndo, update side is protected. */
- ASSERT_RTNL();
-
/* Reuse path - other offloaded program is already tracking this map. */
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params);
@@ -84,8 +81,6 @@ nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
bool freed = false;
int i;
- ASSERT_RTNL();
-
for (i = 0; i < nfp_prog->map_records_cnt; i++) {
if (--nfp_prog->map_records[i]->count) {
nfp_prog->map_records[i] = NULL;
@@ -187,11 +182,10 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
-static int
-nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
{
- struct bpf_prog *prog = bpf->verifier.prog;
+ struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
+ struct nfp_app *app = nn->app;
struct nfp_prog *nfp_prog;
int ret;
@@ -209,7 +203,6 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
goto err_free;
nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
- bpf->verifier.ops = &nfp_bpf_analyzer_ops;
return 0;
@@ -219,8 +212,9 @@ err_free:
return ret;
}
-static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
+static int nfp_bpf_translate(struct bpf_prog *prog)
{
+ struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_instr;
int err;
@@ -242,15 +236,13 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
}
-static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
+static void nfp_bpf_destroy(struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kvfree(nfp_prog->prog);
nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
nfp_prog_free(nfp_prog);
-
- return 0;
}
/* Atomic engine requires values to be in big endian, we need to byte swap
@@ -422,12 +414,6 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
{
switch (bpf->command) {
- case BPF_OFFLOAD_VERIFIER_PREP:
- return nfp_bpf_verifier_prep(app, nn, bpf);
- case BPF_OFFLOAD_TRANSLATE:
- return nfp_bpf_translate(nn, bpf->offload.prog);
- case BPF_OFFLOAD_DESTROY:
- return nfp_bpf_destroy(nn, bpf->offload.prog);
case BPF_OFFLOAD_MAP_ALLOC:
return nfp_bpf_map_alloc(app->priv, bpf->offmap);
case BPF_OFFLOAD_MAP_FREE:
@@ -489,14 +475,15 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int max_mtu, max_stack, max_prog_len;
+ unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
- max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (max_mtu < nn->dp.netdev->mtu) {
- NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
+ fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
+ if (fw_mtu < pkt_off) {
+ NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
return -EOPNOTSUPP;
}
@@ -600,3 +587,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
return 0;
}
+
+const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
+ .insn_hook = nfp_verify_insn,
+ .finalize = nfp_bpf_finalize,
+ .prepare = nfp_bpf_verifier_prep,
+ .translate = nfp_bpf_translate,
+ .destroy = nfp_bpf_destroy,
+};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 99f977bfd8cc..337bb862ec1d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -623,8 +623,8 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
-static int
-nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
+ int prev_insn_idx)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
@@ -745,7 +745,7 @@ continue_subprog:
goto continue_subprog;
}
-static int nfp_bpf_finalize(struct bpf_verifier_env *env)
+int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *info;
struct nfp_prog *nfp_prog;
@@ -788,8 +788,3 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
-
-const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
- .insn_hook = nfp_verify_insn,
- .finalize = nfp_bpf_finalize,
-};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 244dc261006e..8d54b36afee8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,7 +2,6 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
-#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_csum.h>
@@ -91,21 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
return act_size;
}
-static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
- enum nfp_flower_tun_type tun_type)
-{
- if (!out_dev->rtnl_link_ops)
- return false;
-
- if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
- return tun_type == NFP_FL_TUNNEL_VXLAN;
-
- if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
- return tun_type == NFP_FL_TUNNEL_GENEVE;
-
- return false;
-}
-
static int
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
@@ -151,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
- /* Only offload if egress ports are on the same device as the
- * ingress port.
- */
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
- return -EOPNOTSUPP;
+ if (nfp_netdev_is_nfp_repr(in_dev)) {
+ /* Confirm ingress and egress are on same device. */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+ }
+
if (!nfp_netdev_is_nfp_repr(out_dev))
return -EOPNOTSUPP;
@@ -384,10 +369,21 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
return 0;
}
+struct ipv4_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
static int
nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
- struct nfp_fl_set_ip4_addrs *set_ip_addr)
+ struct nfp_fl_set_ip4_addrs *set_ip_addr,
+ struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
{
+ struct ipv4_ttl_word *ttl_word_mask;
+ struct ipv4_ttl_word *ttl_word;
+ struct iphdr *tos_word_mask;
+ struct iphdr *tos_word;
__be32 exact, mask;
/* We are expecting tcf_pedit to return a big endian value */
@@ -402,20 +398,53 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
set_ip_addr->ipv4_dst_mask |= mask;
set_ip_addr->ipv4_dst &= ~mask;
set_ip_addr->ipv4_dst |= exact & mask;
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+ NFP_FL_LW_SIZ;
break;
case offsetof(struct iphdr, saddr):
set_ip_addr->ipv4_src_mask |= mask;
set_ip_addr->ipv4_src &= ~mask;
set_ip_addr->ipv4_src |= exact & mask;
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+ NFP_FL_LW_SIZ;
+ break;
+ case offsetof(struct iphdr, ttl):
+ ttl_word_mask = (struct ipv4_ttl_word *)&mask;
+ ttl_word = (struct ipv4_ttl_word *)&exact;
+
+ if (ttl_word_mask->protocol || ttl_word_mask->check)
+ return -EOPNOTSUPP;
+
+ set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
+ set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
+ set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
+ set_ip_ttl_tos->head.jump_id =
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+ NFP_FL_LW_SIZ;
+ break;
+ case round_down(offsetof(struct iphdr, tos), 4):
+ tos_word_mask = (struct iphdr *)&mask;
+ tos_word = (struct iphdr *)&exact;
+
+ if (tos_word_mask->version || tos_word_mask->ihl ||
+ tos_word_mask->tot_len)
+ return -EOPNOTSUPP;
+
+ set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
+ set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
+ set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
+ set_ip_ttl_tos->head.jump_id =
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+ NFP_FL_LW_SIZ;
break;
default:
return -EOPNOTSUPP;
}
- set_ip_addr->reserved = cpu_to_be16(0);
- set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
- set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
-
return 0;
}
@@ -432,12 +461,57 @@ nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
}
+struct ipv6_hop_limit_word {
+ __be16 payload_len;
+ u8 nexthdr;
+ u8 hop_limit;
+};
+
+static int
+nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+{
+ struct ipv6_hop_limit_word *fl_hl_mask;
+ struct ipv6_hop_limit_word *fl_hl;
+
+ switch (off) {
+ case offsetof(struct ipv6hdr, payload_len):
+ fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
+ fl_hl = (struct ipv6_hop_limit_word *)&exact;
+
+ if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
+ return -EOPNOTSUPP;
+
+ ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
+ ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
+ ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
+ fl_hl_mask->hop_limit;
+ break;
+ case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
+ if (mask & ~IPV6_FLOW_LABEL_MASK ||
+ exact & ~IPV6_FLOW_LABEL_MASK)
+ return -EOPNOTSUPP;
+
+ ip_hl_fl->ipv6_label_mask |= mask;
+ ip_hl_fl->ipv6_label &= ~mask;
+ ip_hl_fl->ipv6_label |= exact & mask;
+ break;
+ }
+
+ ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
+ ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
static int
nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
- struct nfp_fl_set_ipv6_addr *ip_src)
+ struct nfp_fl_set_ipv6_addr *ip_src,
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
{
__be32 exact, mask;
+ int err = 0;
u8 word;
/* We are expecting tcf_pedit to return a big endian value */
@@ -448,7 +522,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
return -EOPNOTSUPP;
if (off < offsetof(struct ipv6hdr, saddr)) {
- return -EOPNOTSUPP;
+ err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
+ ip_hl_fl);
} else if (off < offsetof(struct ipv6hdr, daddr)) {
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
@@ -462,7 +537,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
return -EOPNOTSUPP;
}
- return 0;
+ return err;
}
static int
@@ -513,6 +588,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
char *nfp_action, int *a_len, u32 *csum_updated)
{
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
+ struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
@@ -522,6 +599,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
u32 offset, cmd;
u8 ip_proto = 0;
+ memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
+ memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
memset(&set_ip6_src, 0, sizeof(set_ip6_src));
memset(&set_ip_addr, 0, sizeof(set_ip_addr));
@@ -542,11 +621,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
err = nfp_fl_set_eth(action, idx, offset, &set_eth);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
+ &set_ip_ttl_tos);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
- &set_ip6_src);
+ &set_ip6_src, &set_ip6_tc_hl_fl);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
err = nfp_fl_set_tport(action, idx, offset, &set_tport,
@@ -577,6 +657,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
memcpy(nfp_action, &set_eth, act_size);
*a_len += act_size;
}
+ if (set_ip_ttl_tos.head.len_lw) {
+ nfp_action += act_size;
+ act_size = sizeof(set_ip_ttl_tos);
+ memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ *a_len += act_size;
+
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+ nfp_fl_csum_l4_to_flag(ip_proto);
+ }
if (set_ip_addr.head.len_lw) {
nfp_action += act_size;
act_size = sizeof(set_ip_addr);
@@ -587,6 +677,15 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
+ if (set_ip6_tc_hl_fl.head.len_lw) {
+ nfp_action += act_size;
+ act_size = sizeof(set_ip6_tc_hl_fl);
+ memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ *a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
+ }
if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
@@ -728,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
- struct nfp_repr *repr = netdev_priv(netdev);
- *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 29d673aa5277..15f41cfef9f1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
+#include <net/vxlan.h>
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
@@ -65,8 +66,10 @@
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13
#define NFP_FL_ACTION_OPCODE_SET_UDP 14
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
@@ -82,6 +85,8 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
+
/* LAG ports */
#define NFP_FL_LAG_OUT 0xC0DE0000
@@ -125,6 +130,26 @@ struct nfp_fl_set_ip4_addrs {
__be32 ipv4_dst;
};
+struct nfp_fl_set_ip4_ttl_tos {
+ struct nfp_fl_act_head head;
+ u8 ipv4_ttl_mask;
+ u8 ipv4_tos_mask;
+ u8 ipv4_ttl;
+ u8 ipv4_tos;
+ __be16 reserved;
+};
+
+struct nfp_fl_set_ipv6_tc_hl_fl {
+ struct nfp_fl_act_head head;
+ u8 ipv6_tc_mask;
+ u8 ipv6_hop_limit_mask;
+ __be16 reserved;
+ u8 ipv6_tc;
+ u8 ipv6_hop_limit;
+ __be32 ipv6_label_mask;
+ __be32 ipv6_label;
+};
+
struct nfp_fl_set_ipv6_addr {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -475,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
return skb->len - NFP_FLOWER_CMSG_HLEN;
}
+static inline bool
+nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
+ enum nfp_flower_tun_type tun_type)
+{
+ if (netif_is_vxlan(netdev))
+ return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (netif_is_geneve(netdev))
+ return tun_type == NFP_FL_TUNNEL_GENEVE;
+
+ return false;
+}
+
+static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
+{
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+ if (netif_is_vxlan(netdev))
+ return true;
+ if (netif_is_geneve(netdev))
+ return true;
+
+ return false;
+}
+
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 81dcf5b318ba..5db838f45694 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -472,17 +472,25 @@ nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
}
-static int
+static void
nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
struct net_device *master)
{
struct nfp_fl_lag_group *group;
+ struct nfp_flower_priv *priv;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+ if (!netif_is_bond_master(master))
+ return;
mutex_lock(&lag->lock);
group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
if (!group) {
mutex_unlock(&lag->lock);
- return -ENOENT;
+ nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
+ netdev_name(master));
+ return;
}
group->to_remove = true;
@@ -490,7 +498,6 @@ nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
- return 0;
}
static int
@@ -575,7 +582,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
return 0;
}
-static int
+static void
nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
struct netdev_notifier_changelowerstate_info *info)
{
@@ -586,18 +593,18 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
unsigned long *flags;
if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
- return 0;
+ return;
lag_lower_info = info->lower_state_info;
if (!lag_lower_info)
- return 0;
+ return;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
repr = netdev_priv(netdev);
/* Verify that the repr is associated with this app. */
if (repr->app != priv->app)
- return 0;
+ return;
repr_priv = repr->app_priv;
flags = &repr_priv->lag_port_flags;
@@ -617,20 +624,15 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
- return 0;
}
-static int
-nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
- void *ptr)
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ unsigned long event, void *ptr)
{
- struct net_device *netdev;
- struct nfp_fl_lag *lag;
+ struct nfp_fl_lag *lag = &priv->nfp_lag;
int err;
- netdev = netdev_notifier_info_to_dev(ptr);
- lag = container_of(nb, struct nfp_fl_lag, lag_nb);
-
switch (event) {
case NETDEV_CHANGEUPPER:
err = nfp_fl_lag_changeupper_event(lag, ptr);
@@ -638,17 +640,11 @@ nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
return NOTIFY_BAD;
return NOTIFY_OK;
case NETDEV_CHANGELOWERSTATE:
- err = nfp_fl_lag_changels_event(lag, netdev, ptr);
- if (err)
- return NOTIFY_BAD;
+ nfp_fl_lag_changels_event(lag, netdev, ptr);
return NOTIFY_OK;
case NETDEV_UNREGISTER:
- if (netif_is_bond_master(netdev)) {
- err = nfp_fl_lag_schedule_group_delete(lag, netdev);
- if (err)
- return NOTIFY_BAD;
- return NOTIFY_OK;
- }
+ nfp_fl_lag_schedule_group_delete(lag, netdev);
+ return NOTIFY_OK;
}
return NOTIFY_DONE;
@@ -673,8 +669,6 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag)
/* 0 is a reserved batch version so increment to first valid value. */
nfp_fl_increment_version(lag);
-
- lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
}
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 3a54728d2ea6..5059110a1768 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
}
-static int
-nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
-{
- return tc_setup_cb_egdev_register(netdev,
- nfp_flower_setup_tc_egress_cb,
- netdev_priv(netdev));
-}
-
static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
kfree(repr->app_priv);
-
- tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
- netdev_priv(netdev));
}
static void
@@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
+ INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
+
return 0;
err_cleanup_metadata:
@@ -661,10 +652,6 @@ static int nfp_flower_start(struct nfp_app *app)
err = nfp_flower_lag_reset(&app_priv->nfp_lag);
if (err)
return err;
-
- err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
- if (err)
- return err;
}
return nfp_tunnel_config_start(app);
@@ -672,12 +659,27 @@ static int nfp_flower_start(struct nfp_app *app)
static void nfp_flower_stop(struct nfp_app *app)
{
+ nfp_tunnel_config_stop(app);
+}
+
+static int
+nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
+ unsigned long event, void *ptr)
+{
struct nfp_flower_priv *app_priv = app->priv;
+ int ret;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
- unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+ }
- nfp_tunnel_config_stop(app);
+ ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+
+ return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}
const struct nfp_app_type app_flower = {
@@ -698,7 +700,6 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
- .repr_init = nfp_flower_repr_netdev_init,
.repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
@@ -708,6 +709,8 @@ const struct nfp_app_type app_flower = {
.start = nfp_flower_start,
.stop = nfp_flower_stop,
+ .netdev_event = nfp_flower_netdev_event,
+
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 90045bab95bf..b858bac47621 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -20,7 +20,6 @@ struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;
-#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
@@ -72,7 +71,6 @@ struct nfp_mtu_conf {
/**
* struct nfp_fl_lag - Flower APP priv data for link aggregation
- * @lag_nb: Notifier to track master/slave events
* @work: Work queue for writing configs to the HW
* @lock: Lock to protect lag_group_list
* @group_list: List of all master/slave groups offloaded
@@ -85,7 +83,6 @@ struct nfp_mtu_conf {
* retransmission
*/
struct nfp_fl_lag {
- struct notifier_block lag_nb;
struct delayed_work work;
struct mutex lock;
struct list_head group_list;
@@ -126,13 +123,13 @@ struct nfp_fl_lag {
* @nfp_neigh_off_lock: Lock for the neighbour address list
* @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
* @nfp_mac_off_count: Number of MACs in address list
- * @nfp_tun_mac_nb: Notifier to monitor link state
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
* @reify_replies: atomically stores the number of replies received
* from firmware for repr reify
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
+ * @indr_block_cb_priv: List of priv data passed to indirect block cbs
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -160,12 +157,12 @@ struct nfp_flower_priv {
spinlock_t nfp_neigh_off_lock;
struct ida nfp_mac_off_ids;
int nfp_mac_off_count;
- struct notifier_block nfp_tun_mac_nb;
struct notifier_block nfp_tun_neigh_nb;
atomic_t reify_replies;
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
+ struct list_head indr_block_cb_priv;
};
/**
@@ -209,7 +206,6 @@ struct nfp_fl_payload {
char *unmasked_data;
char *mask_data;
char *action_data;
- bool ingress_offload;
};
extern const struct rhashtable_params nfp_flower_table_params;
@@ -226,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
@@ -244,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
- struct net_device *netdev, __be32 host_ctx);
+ struct net_device *netdev);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
@@ -252,21 +249,28 @@ void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
int nfp_tunnel_config_start(struct nfp_app *app);
void nfp_tunnel_config_stop(struct nfp_app *app);
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event, void *ptr);
void nfp_tunnel_write_macs(struct nfp_app *app);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ unsigned long event, void *ptr);
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e54fb6034326..cdf75595f627 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
- if (tun_type)
+ if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
- else
+ } else {
+ if (!cmsg_port)
+ return -EOPNOTSUPP;
frame->in_port = cpu_to_be32(cmsg_port);
+ }
return 0;
}
@@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
}
}
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
- struct nfp_repr *netdev_repr;
+ u32 cmsg_port = 0;
int err;
u8 *ext;
u8 *msk;
+ if (nfp_netdev_is_nfp_repr(netdev))
+ cmsg_port = nfp_repr_get_port_id(netdev);
+
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- nfp_repr_get_port_id(netdev),
- false, tun_type);
+ cmsg_port, false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- nfp_repr_get_port_id(netdev),
- true, tun_type);
+ cmsg_port, true, tun_type);
if (err)
return err;
@@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
- if (nfp_netdev_is_nfp_repr(netdev)) {
- netdev_repr = netdev_priv(netdev);
- nfp_tunnel_write_macs(netdev_repr->app);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
- }
+ nfp_tunnel_write_macs(app);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(app, tun_dst);
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
err = nfp_flower_compile_geneve_opt(ext, flow, false);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 48729bf171e0..573a4400a26c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -21,7 +21,6 @@ struct nfp_mask_id_table {
struct nfp_fl_flow_table_cmp_arg {
struct net_device *netdev;
unsigned long cookie;
- __be32 host_ctx;
};
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
@@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
- struct net_device *netdev, __be32 host_ctx)
+ struct net_device *netdev)
{
struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
flower_cmp_arg.netdev = netdev;
flower_cmp_arg.cookie = tc_flower_cookie;
- flower_cmp_arg.host_ctx = host_ctx;
return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
nfp_flower_table_params);
@@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+ nfp_flow->ingress_dev = netdev;
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
@@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
priv->stats[stats_cxt].bytes = 0;
priv->stats[stats_cxt].used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
@@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
const struct nfp_fl_payload *flow_entry = obj;
- if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
- (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
- flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
+ if (flow_entry->ingress_dev == cmp_arg->netdev)
return flow_entry->tc_flower_cookie != cmp_arg->cookie;
return 1;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 29c95423ab64..545d94168874 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -56,11 +56,10 @@
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
static int
-nfp_flower_xmit_flow(struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow, u8 mtype)
+nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
+ u8 mtype)
{
u32 meta_len, key_len, mask_len, act_len, tot_len;
- struct nfp_repr *priv = netdev_priv(netdev);
struct sk_buff *skb;
unsigned char *msg;
@@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
- skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
+ skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
- nfp_ctrl_tx(priv->app->ctrl, skb);
+ nfp_ctrl_tx(app->ctrl, skb);
return 0;
}
@@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
- bool egress,
enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
@@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->key);
- if (!egress)
- return -EOPNOTSUPP;
if (mask_enc_ctl->addr_type != 0xffff ||
enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
@@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
default:
return -EOPNOTSUPP;
}
- } else if (egress) {
- /* Reject non tunnel matches offloaded to egress repr. */
- return -EOPNOTSUPP;
+
+ /* Ensure the ingress netdev matches the expected tun type. */
+ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
+ return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -374,7 +372,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
{
struct nfp_fl_payload *flow_pay;
@@ -398,7 +396,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
- flow_pay->ingress_offload = !egress;
return flow_pay;
@@ -416,7 +413,6 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
- * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
@@ -424,46 +420,35 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
- struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
- struct net_device *ingr_dev;
+ struct nfp_port *port = NULL;
int err;
- ingr_dev = egress ? NULL : netdev;
- flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
- if (flow_pay) {
- /* Ignore as duplicate if it has been added by different cb. */
- if (flow_pay->ingress_offload && egress)
- return 0;
- else
- return -EOPNOTSUPP;
- }
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+ err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
&tun_type);
if (err)
goto err_free_key_ls;
- flow_pay = nfp_flower_allocate_new(key_layer, egress);
+ flow_pay = nfp_flower_allocate_new(key_layer);
if (!flow_pay) {
err = -ENOMEM;
goto err_free_key_ls;
}
- flow_pay->ingress_dev = egress ? NULL : netdev;
-
- err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
- tun_type);
+ err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
+ flow_pay, tun_type);
if (err)
goto err_destroy_flow;
@@ -471,12 +456,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay,
- flow_pay->ingress_dev);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
if (err)
goto err_destroy_flow;
- err = nfp_flower_xmit_flow(netdev, flow_pay,
+ err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_destroy_flow;
@@ -487,7 +471,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- port->tc_offload_cnt++;
+ if (port)
+ port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer);
@@ -509,7 +494,6 @@ err_free_key_ls:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure
- * @egress: Netdev is the egress dev.
*
* Removes a flow from the repeated hash structure and clears the
* action payload.
@@ -518,19 +502,19 @@ err_free_key_ls:
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
- struct net_device *ingr_dev;
+ struct nfp_port *port = NULL;
int err;
- ingr_dev = egress ? NULL : netdev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
+
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow)
- return egress ? 0 : -ENOENT;
+ return -ENOENT;
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -539,13 +523,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
- err = nfp_flower_xmit_flow(netdev, nfp_flow,
+ err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
goto err_free_flow;
err_free_flow:
- port->tc_offload_cnt--;
+ if (port)
+ port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
@@ -561,7 +546,6 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: Netdev structure.
* @flow: TC flower classifier offload structure
- * @egress: Netdev is the egress dev.
*
* Populates a flow statistics structure which which corresponds to a
* specific flow.
@@ -570,22 +554,16 @@ err_free_flow:
*/
static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
- struct net_device *ingr_dev;
u32 ctx_id;
- ingr_dev = egress ? NULL : netdev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow)
return -EINVAL;
- if (nfp_flow->ingress_offload && egress)
- return 0;
-
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
@@ -602,35 +580,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower, bool egress)
+ struct tc_cls_flower_offload *flower)
{
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
- return nfp_flower_add_offload(app, netdev, flower, egress);
+ return nfp_flower_add_offload(app, netdev, flower);
case TC_CLSFLOWER_DESTROY:
- return nfp_flower_del_offload(app, netdev, flower, egress);
+ return nfp_flower_del_offload(app, netdev, flower);
case TC_CLSFLOWER_STATS:
- return nfp_flower_get_stats(app, netdev, flower, egress);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- struct nfp_repr *repr = cb_priv;
-
- if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
- return -EOPNOTSUPP;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data, true);
+ return nfp_flower_get_stats(app, netdev, flower);
default:
return -EOPNOTSUPP;
}
@@ -647,7 +608,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data, false);
+ type_data);
default:
return -EOPNOTSUPP;
}
@@ -686,3 +647,129 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
+
+struct nfp_flower_indr_block_cb_priv {
+ struct net_device *netdev;
+ struct nfp_app *app;
+ struct list_head list;
+};
+
+static struct nfp_flower_indr_block_cb_priv *
+nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ struct nfp_flower_indr_block_cb_priv *cb_priv;
+ struct nfp_flower_priv *priv = app->priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+ struct tc_cls_flower_offload *flower = type_data;
+
+ if (flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(priv->app, priv->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
+ struct tc_block_offload *f)
+{
+ struct nfp_flower_indr_block_cb_priv *cb_priv;
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+ if (!cb_priv)
+ return -ENOMEM;
+
+ cb_priv->netdev = netdev;
+ cb_priv->app = app;
+ list_add(&cb_priv->list, &priv->indr_block_cb_priv);
+
+ err = tcf_block_cb_register(f->block,
+ nfp_flower_setup_indr_block_cb,
+ netdev, cb_priv, f->extack);
+ if (err) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ }
+
+ return err;
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ nfp_flower_setup_indr_block_cb, netdev);
+ cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+ if (cb_priv) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ }
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ int err;
+
+ if (!nfp_fl_is_netdev_to_offload(netdev))
+ return NOTIFY_OK;
+
+ if (event == NETDEV_REGISTER) {
+ err = __tc_indr_block_cb_register(netdev, app,
+ nfp_flower_indr_setup_tc_cb,
+ netdev);
+ if (err)
+ nfp_flower_cmsg_warn(app,
+ "Indirect block reg failed - %s\n",
+ netdev->name);
+ } else if (event == NETDEV_UNREGISTER) {
+ __tc_indr_block_cb_unregister(netdev,
+ nfp_flower_indr_setup_tc_cb,
+ netdev);
+ }
+
+ return NOTIFY_OK;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 8e5bec04d1f9..2d9f26a725c2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -4,7 +4,6 @@
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <net/netevent.h>
-#include <net/vxlan.h>
#include <linux/idr.h>
#include <net/dst_metadata.h>
#include <net/arp.h>
@@ -182,18 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
}
-static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
-{
- if (!netdev->rtnl_link_ops)
- return false;
- if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
- return true;
- if (netif_is_vxlan(netdev))
- return true;
-
- return false;
-}
-
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
@@ -615,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_repr_get_port_id(netdev);
- else if (!nfp_tun_is_netdev_to_offload(netdev))
+ else if (!nfp_fl_is_netdev_to_offload(netdev))
return;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -652,29 +639,16 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
mutex_unlock(&priv->nfp_mac_off_lock);
}
-static int nfp_tun_mac_event_handler(struct notifier_block *nb,
- unsigned long event, void *ptr)
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event, void *ptr)
{
- struct nfp_flower_priv *app_priv;
- struct net_device *netdev;
- struct nfp_app *app;
-
if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
- app_priv = container_of(nb, struct nfp_flower_priv,
- nfp_tun_mac_nb);
- app = app_priv->app;
- netdev = netdev_notifier_info_to_dev(ptr);
-
/* If non-nfp netdev then free its offload index. */
- if (nfp_tun_is_netdev_to_offload(netdev))
+ if (nfp_fl_is_netdev_to_offload(netdev))
nfp_tun_del_mac_idx(app, netdev->ifindex);
} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
event == NETDEV_REGISTER) {
- app_priv = container_of(nb, struct nfp_flower_priv,
- nfp_tun_mac_nb);
- app = app_priv->app;
- netdev = netdev_notifier_info_to_dev(ptr);
-
nfp_tun_add_to_mac_offload_list(netdev, app);
/* Force a list write to keep NFP up to date. */
@@ -686,14 +660,11 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb,
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
- struct net_device *netdev;
- int err;
/* Initialise priv data for MAC offloading. */
priv->nfp_mac_off_count = 0;
mutex_init(&priv->nfp_mac_off_lock);
INIT_LIST_HEAD(&priv->nfp_mac_off_list);
- priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
mutex_init(&priv->nfp_mac_index_lock);
INIT_LIST_HEAD(&priv->nfp_mac_index_list);
ida_init(&priv->nfp_mac_off_ids);
@@ -707,27 +678,7 @@ int nfp_tunnel_config_start(struct nfp_app *app)
INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
- err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
- if (err)
- goto err_free_mac_ida;
-
- err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
- if (err)
- goto err_unreg_mac_nb;
-
- /* Parse netdevs already registered for MACs that need offloaded. */
- rtnl_lock();
- for_each_netdev(&init_net, netdev)
- nfp_tun_add_to_mac_offload_list(netdev, app);
- rtnl_unlock();
-
- return 0;
-
-err_unreg_mac_nb:
- unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
-err_free_mac_ida:
- ida_destroy(&priv->nfp_mac_off_ids);
- return err;
+ return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
}
void nfp_tunnel_config_stop(struct nfp_app *app)
@@ -739,7 +690,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
struct nfp_ipv4_addr_entry *ip_entry;
struct list_head *ptr, *storage;
- unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
/* Free any memory that may be occupied by MAC list. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 68a0991aac22..4a1b8f79e731 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -136,6 +136,53 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
return old;
}
+static int
+nfp_app_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct net_device *netdev;
+ struct nfp_app *app;
+
+ netdev = netdev_notifier_info_to_dev(ptr);
+ app = container_of(nb, struct nfp_app, netdev_nb);
+
+ if (app->type->netdev_event)
+ return app->type->netdev_event(app, netdev, event, ptr);
+ return NOTIFY_DONE;
+}
+
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
+{
+ int err;
+
+ app->ctrl = ctrl;
+
+ if (app->type->start) {
+ err = app->type->start(app);
+ if (err)
+ return err;
+ }
+
+ app->netdev_nb.notifier_call = nfp_app_netdev_event;
+ err = register_netdevice_notifier(&app->netdev_nb);
+ if (err)
+ goto err_app_stop;
+
+ return 0;
+
+err_app_stop:
+ if (app->type->stop)
+ app->type->stop(app);
+ return err;
+}
+
+void nfp_app_stop(struct nfp_app *app)
+{
+ unregister_netdevice_notifier(&app->netdev_nb);
+
+ if (app->type->stop)
+ app->type->stop(app);
+}
+
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 4d6ecf99b1cc..d578d856a009 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -69,6 +69,7 @@ extern const struct nfp_app_type app_abm;
* @port_get_stats_strings: get strings for extra statistics
* @start: start application logic
* @stop: stop application logic
+ * @netdev_event: Netdevice notifier event
* @ctrl_msg_rx: control message handler
* @ctrl_msg_rx_raw: handler for control messages from data queues
* @setup_tc: setup TC ndo
@@ -122,6 +123,9 @@ struct nfp_app_type {
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
+ int (*netdev_event)(struct nfp_app *app, struct net_device *netdev,
+ unsigned long event, void *ptr);
+
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data,
unsigned int len);
@@ -151,6 +155,7 @@ struct nfp_app_type {
* @reprs: array of pointers to representors
* @type: pointer to const application ops and info
* @ctrl_mtu: MTU to set on the control vNIC (set in .init())
+ * @netdev_nb: Netdevice notifier block
* @priv: app-specific priv data
*/
struct nfp_app {
@@ -163,6 +168,9 @@ struct nfp_app {
const struct nfp_app_type *type;
unsigned int ctrl_mtu;
+
+ struct notifier_block netdev_nb;
+
void *priv;
};
@@ -264,21 +272,6 @@ nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
return app->type->repr_change_mtu(app, netdev, new_mtu);
}
-static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
-{
- app->ctrl = ctrl;
- if (!app->type->start)
- return 0;
- return app->type->start(app);
-}
-
-static inline void nfp_app_stop(struct nfp_app *app)
-{
- if (!app->type->stop)
- return;
- app->type->stop(app);
-}
-
static inline const char *nfp_app_name(struct nfp_app *app)
{
if (!app)
@@ -430,6 +423,8 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority);
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
void nfp_app_free(struct nfp_app *app);
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl);
+void nfp_app_stop(struct nfp_app *app);
/* Callbacks shared between apps */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 6f0c37d09256..bb3dbd74583b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -851,7 +851,7 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
@@ -868,6 +868,7 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
+int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6bddfcfdec34..9aa6265bf4de 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -279,7 +279,7 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*
* Return: Negative errno on error, 0 on success
*/
-static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
@@ -890,8 +890,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_end(&r_vec->tx_sync);
}
- netdev_tx_sent_queue(nd_q, txbuf->real_len);
-
skb_tx_timestamp(skb);
tx_ring->wr_p += nr_frags + 1;
@@ -899,7 +897,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (!skb->xmit_more || netif_xmit_stopped(nd_q))
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;
@@ -3560,6 +3558,7 @@ void nfp_net_info(struct nfp_net *nn)
/**
* nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @ctrl_bar: PCI IOMEM with vNIC config memory
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device
@@ -3570,11 +3569,12 @@ void nfp_net_info(struct nfp_net *nn)
*
* Return: NFP Net device structure, or ERR_PTR on error.
*/
-struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
- unsigned int max_tx_rings,
- unsigned int max_rx_rings)
+struct nfp_net *
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+ unsigned int max_tx_rings, unsigned int max_rx_rings)
{
struct nfp_net *nn;
+ int err;
if (needs_netdev) {
struct net_device *netdev;
@@ -3594,6 +3594,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
}
nn->dp.dev = &pdev->dev;
+ nn->dp.ctrl_bar = ctrl_bar;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
@@ -3616,7 +3617,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+ &nn->tlv_caps);
+ if (err)
+ goto err_free_nn;
+
return nn;
+
+err_free_nn:
+ if (nn->dp.netdev)
+ free_netdev(nn->dp.netdev);
+ else
+ vfree(nn);
+ return ERR_PTR(err);
}
/**
@@ -3889,11 +3902,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
- err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
- &nn->tlv_caps);
- if (err)
- return err;
-
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index d7c8518ac952..ccec69944de5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -397,6 +397,8 @@
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
+#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
+
/**
* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 1e7d20468a34..08f5fdbd8e41 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -116,13 +116,13 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
- nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
+ nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+ n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
- nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index d2c1e9ea5668..1145849ca7ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -172,7 +172,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
@@ -180,7 +180,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
vf->nn = nn;
nn->fw_ver = fw_ver;
- nn->dp.ctrl_bar = ctrl_bar;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 25382f8fbb70..89d17399fb5a 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -280,7 +280,7 @@
#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
/*
- * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
+ * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared
* register definitions
*/
#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
@@ -291,7 +291,7 @@
#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
/*
- * rxfliterctrl register definitions
+ * rxfilterctrl register definitions
*/
#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
@@ -783,8 +783,6 @@ static int lpc_mii_probe(struct net_device *ndev)
phy_set_max_speed(phydev, SPEED_100);
- phydev->advertising = phydev->supported;
-
pldat->link = 0;
pldat->speed = 0;
pldat->duplex = -1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 5c221ebaa7b3..7e120b58ac58 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12655,6 +12655,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
@@ -12814,6 +12815,11 @@ struct public_drv_mb {
union drv_union_data union_data;
};
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
+
enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LINK_CHANGE,
MFW_DRV_MSG_FLR_FW_ACK_FAILED,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index fff7f04d4525..4b3e68217502 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1939,21 +1939,30 @@ exit:
* 0B | 0x3 [command index] |
* 4B | b'0: check_response? | b'1-31 reserved |
* 8B | File-type | reserved |
+ * 12B | Image length in bytes |
* \----------------------------------------------------------------------/
* Start a new file of the provided type
*/
static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
const u8 **data, bool *check_resp)
{
+ u32 file_type, file_size = 0;
int rc;
*data += 4;
*check_resp = !!(**data & BIT(0));
*data += 4;
+ file_type = **data;
DP_VERBOSE(cdev, NETIF_MSG_DRV,
- "About to start a new file of type %02x\n", **data);
- rc = qed_mcp_nvm_put_file_begin(cdev, **data);
+ "About to start a new file of type %02x\n", file_type);
+ if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
+ *data += 4;
+ file_size = *((u32 *)(*data));
+ }
+
+ rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
+ (u8 *)(&file_size), 4);
*data += 4;
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index a96364df4320..e7f18e34ff0d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1619,7 +1619,7 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_sp_pf_update_stag(p_hwfn);
}
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
/* Acknowledge the MFW */
@@ -1641,7 +1641,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
OEM_CFG_CHANNEL_TYPE_OFFSET;
if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
- DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Incorrect UFP Channel type %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
if (val == OEM_CFG_SCHED_TYPE_ETS) {
@@ -1650,7 +1652,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
} else {
p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
- DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Unknown UFP scheduling mode %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
}
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
@@ -1665,13 +1669,15 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
} else {
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
- DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Unknown Host priority control %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
}
DP_NOTICE(p_hwfn,
- "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
- p_hwfn->ufp_info.mode,
- p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
+ "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
}
static int
@@ -2739,24 +2745,6 @@ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
return 0;
}
-int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
- u32 resp, param;
- int rc;
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EBUSY;
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
- &resp, &param);
- cdev->mcp_nvm_resp = resp;
- qed_ptt_release(p_hwfn, p_ptt);
-
- return rc;
-}
-
int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len)
{
@@ -2770,6 +2758,9 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
return -EBUSY;
switch (cmd) {
+ case QED_PUT_FILE_BEGIN:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+ break;
case QED_PUT_FILE_DATA:
nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
break;
@@ -2782,10 +2773,14 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
goto out;
}
+ buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
while (buf_idx < len) {
- buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
- nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
- addr) + buf_idx;
+ if (cmd == QED_PUT_FILE_BEGIN)
+ nvm_offset = addr;
+ else
+ nvm_offset = ((buf_size <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
+ buf_idx;
rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
&resp, &param, buf_size,
(u32 *)&p_buf[buf_idx]);
@@ -2810,7 +2805,19 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
usleep_range(1000, 2000);
- buf_idx += buf_size;
+ /* For MBI upgrade, MFW response includes the next buffer offset
+ * to be delivered to MFW.
+ */
+ if (param && cmd == QED_PUT_FILE_DATA) {
+ buf_idx = QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size = QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ } else {
+ buf_idx += buf_size;
+ buf_size = min_t(u32, (len - buf_idx),
+ MCP_DRV_NVM_BUF_LEN);
+ }
}
cdev->mcp_nvm_resp = resp;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 1adfe52b3905..eddf67798d6f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -543,16 +543,6 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Put file begin
- *
- * @param cdev
- * @param addr - nvm offset
- *
- * @return int - 0 - operation was successful.
- */
-int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr);
-
-/**
* @brief Check latest response
*
* @param cdev
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index de98a974673b..8c0fe59eaad5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -168,6 +168,13 @@ struct qede_ptp;
#define QEDE_RFS_MAX_FLTR 256
+enum qede_flags_bit {
+ QEDE_FLAGS_IS_VF = 0,
+ QEDE_FLAGS_LINK_REQUESTED,
+ QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ QEDE_FLAGS_TX_TIMESTAMPING_EN
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -177,10 +184,7 @@ struct qede_dev {
u8 dp_level;
unsigned long flags;
-#define QEDE_FLAG_IS_VF BIT(0)
-#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
-#define QEDE_TX_TIMESTAMPING_EN BIT(1)
-#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2)
+#define IS_VF(edev) (test_bit(QEDE_FLAGS_IS_VF, &(edev)->flags))
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 46d0f2eaa0c0..efbb4f3f0f4b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1086,7 +1086,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
}
if (is_vf)
- edev->flags |= QEDE_FLAG_IS_VF;
+ set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
qede_init_ndev(edev);
@@ -2057,6 +2057,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
if (!is_locked)
__qede_lock(edev);
+ clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
edev->state = QEDE_STATE_CLOSED;
qede_rdma_dev_event_close(edev);
@@ -2163,6 +2165,8 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
+ set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
/* Ask for link-up using current configuration */
memset(&link_params, 0, sizeof(link_params));
link_params.link_up = true;
@@ -2258,8 +2262,8 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
{
struct qede_dev *edev = dev;
- if (!netif_running(edev->ndev)) {
- DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+ if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
return;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 013ff567283c..5f3f42a25361 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -223,12 +223,12 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
switch (ptp->tx_type) {
case HWTSTAMP_TX_ON:
- edev->flags |= QEDE_TX_TIMESTAMPING_EN;
+ set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_ON;
break;
case HWTSTAMP_TX_OFF:
- edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
+ clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
@@ -518,7 +518,7 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
return;
- if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
+ if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
DP_NOTICE(edev,
"Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (unlikely(ptp->tx_skb)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d42ba2293d8c..16d0479f6891 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2993,10 +2993,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
{
int i;
- struct cmd_desc_type0 *tx_desc_info;
for (i = 0; i < tx_ring->num_desc; i++) {
- tx_desc_info = &tx_ring->desc_head[i];
pr_info("TX Desc: %d\n", i);
print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
&tx_ring->desc_head[i],
@@ -4008,19 +4006,12 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
int queue_type)
{
struct net_device *netdev = adapter->netdev;
- u8 max_hw_rings = 0;
char buf[8];
- int cur_rings;
- if (queue_type == QLCNIC_RX_QUEUE) {
- max_hw_rings = adapter->max_sds_rings;
- cur_rings = adapter->drv_sds_rings;
+ if (queue_type == QLCNIC_RX_QUEUE)
strcpy(buf, "SDS");
- } else if (queue_type == QLCNIC_TX_QUEUE) {
- max_hw_rings = adapter->max_tx_rings;
- cur_rings = adapter->drv_tx_rings;
+ else
strcpy(buf, "Tx");
- }
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index a9f1bc013364..1450f386bc65 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -61,6 +61,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = {
"Transmit ring full",
"SPI errors",
"Write verify errors",
+ "Buffer available errors",
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index d5310504f436..97f92953bdb9 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -289,6 +289,14 @@ qcaspi_transmit(struct qcaspi *qca)
qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available);
+ if (available > QCASPI_HW_BUF_LEN) {
+ /* This could only happen by interferences on the SPI line.
+ * So retry later ...
+ */
+ qca->stats.buf_avail_err++;
+ return -1;
+ }
+
while (qca->txr.skb[qca->txr.head]) {
pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN;
@@ -355,7 +363,13 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available);
- if (available == 0) {
+ if (available > QCASPI_HW_BUF_LEN) {
+ /* This could only happen by interferences on the SPI line.
+ * So retry later ...
+ */
+ qca->stats.buf_avail_err++;
+ return -1;
+ } else if (available == 0) {
netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n");
return -1;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 2d2c49726492..eb9af45fcc5e 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -74,6 +74,7 @@ struct qcaspi_stats {
u64 ring_full;
u64 spi_err;
u64 write_verify_failed;
+ u64 buf_avail_err;
};
struct qcaspi {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 5f4e447c5dce..b8bbee645f51 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -301,10 +301,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
struct rmnet_port *port;
u16 mux_id;
+ if (!dev)
+ return -ENODEV;
+
real_dev = __dev_get_by_index(dev_net(dev),
nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+ if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
port = rmnet_get_port_rtnl(real_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 3ee8ae9b6838..8990307e5475 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -23,9 +23,6 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
- u16 ip_family;
- u16 fc_seq;
- u32 qos_id;
u8 mux_id;
int r;
@@ -45,10 +42,6 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
vnd = ep->egress_dev;
- ip_family = cmd->flow_control.ip_family;
- fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
- qos_id = ntohl(cmd->flow_control.qos_id);
-
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 1fd01688d37b..e26c48bd54a2 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -56,13 +56,6 @@
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-#define TX_SLOTS_AVAIL(tp) \
- (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
-
-/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
-#define TX_FRAGS_READY_FOR(tp,nr_frags) \
- (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
@@ -212,24 +205,24 @@ enum cfg_version {
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
- { PCI_VENDOR_ID_DLINK, 0x4300,
- PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
+ { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x8167), RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8168), RTL_CFG_1 },
+ { PCI_VDEVICE(NCUBE, 0x8168), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x8169), RTL_CFG_0 },
+ { PCI_VENDOR_ID_DLINK, 0x4300,
+ PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
+ { PCI_VDEVICE(DLINK, 0x4300), RTL_CFG_0 },
+ { PCI_VDEVICE(DLINK, 0x4302), RTL_CFG_0 },
+ { PCI_VDEVICE(AT, 0xc107), RTL_CFG_0 },
+ { PCI_VDEVICE(USR, 0x0116), RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
{ 0x0001, 0x8168,
PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
- {0,},
+ {}
};
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
@@ -603,7 +596,6 @@ struct RxDesc {
struct ring_info {
struct sk_buff *skb;
u32 len;
- u8 __pad[sizeof(void *) - sizeof(u32)];
};
struct rtl8169_counters {
@@ -661,7 +653,7 @@ struct rtl8169_private {
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
- u16 event_slow;
+ u16 irq_mask;
const struct rtl_coalesce_info *coalesce_info;
struct clk *clk;
@@ -1102,23 +1094,6 @@ static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
return rtl_eri_read(tp, reg, ERIAR_OOB);
}
-static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- return r8168dp_ocp_read(tp, mask, reg);
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- return r8168ep_ocp_read(tp, mask, reg);
- default:
- BUG();
- return ~0;
- }
-}
-
static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
u32 data)
{
@@ -1134,30 +1109,11 @@ static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
data, ERIAR_OOB);
}
-static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- r8168dp_ocp_write(tp, mask, reg, data);
- break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- r8168ep_ocp_write(tp, mask, reg, data);
- break;
- default:
- BUG();
- break;
- }
-}
-
-static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
+static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
- ocp_write(tp, 0x1, 0x30, 0x00000001);
+ r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
}
#define OOB_CMD_RESET 0x00
@@ -1169,18 +1125,18 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
}
-DECLARE_RTL_COND(rtl_ocp_read_cond)
+DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
{
u16 reg;
reg = rtl8168_get_ocp_reg(tp);
- return ocp_read(tp, 0x0f, reg) & 0x00000800;
+ return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
}
DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
{
- return ocp_read(tp, 0x0f, 0x124) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
}
DECLARE_RTL_COND(rtl_ocp_tx_cond)
@@ -1198,14 +1154,15 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
+ r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
+ rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
- ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
- ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
+ r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x30,
+ r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
}
@@ -1230,15 +1187,16 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
+ r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+ rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
- ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
+ r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x30,
+ r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
}
@@ -1265,12 +1223,12 @@ static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(ocp_read(tp, 0x0f, reg) & 0x00008000);
+ return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001);
+ return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
}
static bool r8168_check_dash(struct rtl8169_private *tp)
@@ -1325,27 +1283,20 @@ static u16 rtl_get_events(struct rtl8169_private *tp)
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
{
RTL_W16(tp, IntrStatus, bits);
- mmiowb();
}
static void rtl_irq_disable(struct rtl8169_private *tp)
{
RTL_W16(tp, IntrMask, 0);
- mmiowb();
-}
-
-static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
-{
- RTL_W16(tp, IntrMask, bits);
}
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
-static void rtl_irq_enable_all(struct rtl8169_private *tp)
+static void rtl_irq_enable(struct rtl8169_private *tp)
{
- rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
+ RTL_W16(tp, IntrMask, tp->irq_mask);
}
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
@@ -2051,8 +2002,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static void rtl8169_get_mac_version(struct rtl8169_private *tp,
- u8 default_version)
+static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{
/*
* The driver currently handles the 8168Bf and the 8168Be identically
@@ -2066,120 +2016,107 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
* (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
*/
static const struct rtl_mac_info {
- u32 mask;
- u32 val;
- int mac_version;
+ u16 mask;
+ u16 val;
+ u16 mac_version;
} mac_info[] = {
/* 8168EP family. */
- { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 },
- { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 },
- { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 },
+ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
+ { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
+ { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
/* 8168H family. */
- { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 },
- { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 },
+ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
+ { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
/* 8168G family. */
- { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 },
- { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
- { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
- { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
+ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
+ { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
+ { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
/* 8168F family. */
- { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
- { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
- { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
+ { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
/* 8168E family. */
- { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
- { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
- { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
+ { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
+ { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
+ { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
/* 8168D family. */
- { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
- { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
+ { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
+ { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
/* 8168DP family. */
- { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
- { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
- { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
+ { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
+ { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
/* 8168C family. */
- { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
- { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
- { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
- { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
- { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
- { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
- { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
+ { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
+ { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
+ { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
+ { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
+ { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
+ { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
+ { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
/* 8168B family. */
- { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
- { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
- { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
+ { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 },
+ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
+ { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
- { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
- { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
- { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
- { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
- { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
- { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
- { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
- { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
- { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
- { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
- { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
- { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
- { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
- { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
+ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
+ { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
+ { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
+ { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
+ { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
+ { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
+ { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
+ { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
/* FIXME: where did these entries come from ? -- FR */
- { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
- { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
+ { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15 },
+ { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14 },
/* 8110 family. */
- { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
- { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
- { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
- { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
- { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
- { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
+ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
+ { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
+ { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
+ { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
+ { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
+ { 0xfc8, 0x000, RTL_GIGA_MAC_VER_01 },
/* Catch-all */
- { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
+ { 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
const struct rtl_mac_info *p = mac_info;
- u32 reg;
+ u16 reg = RTL_R32(tp, TxConfig) >> 20;
- reg = RTL_R32(tp, TxConfig);
while ((reg & p->mask) != p->val)
p++;
tp->mac_version = p->mac_version;
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- dev_notice(tp_to_dev(tp),
- "unknown MAC, using family default\n");
- tp->mac_version = default_version;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_42 :
- RTL_GIGA_MAC_VER_43;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_45 :
- RTL_GIGA_MAC_VER_47;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_46 :
- RTL_GIGA_MAC_VER_48;
+ dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
+ } else if (!tp->supports_gmii) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_42)
+ tp->mac_version = RTL_GIGA_MAC_VER_43;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
+ tp->mac_version = RTL_GIGA_MAC_VER_47;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
+ tp->mac_version = RTL_GIGA_MAC_VER_48;
}
}
-static void rtl8169_print_mac_version(struct rtl8169_private *tp)
-{
- netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
-}
-
struct phy_reg {
u16 reg;
u16 val;
@@ -3902,8 +3839,6 @@ static void rtl_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_print_mac_version(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_01:
break;
@@ -4643,7 +4578,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_mode(tp->dev);
/* no early-rx interrupts */
RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
- rtl_irq_enable_all(tp);
+ rtl_irq_enable(tp);
}
static void rtl_hw_start_8169(struct rtl8169_private *tp)
@@ -5394,8 +5329,8 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
- tp->event_slow |= RxFIFOOver | PCSTimeout;
- tp->event_slow &= ~RxOverflow;
+ tp->irq_mask |= RxFIFOOver;
+ tp->irq_mask &= ~RxOverflow;
}
switch (tp->mac_version) {
@@ -5632,7 +5567,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
static void rtl_hw_start_8101(struct rtl8169_private *tp)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
- tp->event_slow &= ~RxFIFOOver;
+ tp->irq_mask &= ~RxFIFOOver;
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16)
@@ -5888,6 +5823,16 @@ static void rtl8169_tx_timeout(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
+static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry)
+{
+ u32 status = opts0 | len;
+
+ if (entry == NUM_TX_DESC - 1)
+ status |= RingEnd;
+
+ return cpu_to_le32(status);
+}
+
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
u32 *opts)
{
@@ -5900,7 +5845,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
const skb_frag_t *frag = info->frags + cur_frag;
dma_addr_t mapping;
- u32 status, len;
+ u32 len;
void *addr;
entry = (entry + 1) % NUM_TX_DESC;
@@ -5916,11 +5861,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
goto err_out;
}
- /* Anti gcc 2.95.3 bugware (sic) */
- status = opts[0] | len |
- (RingEnd * !((entry + 1) % NUM_TX_DESC));
-
- txd->opts1 = cpu_to_le32(status);
+ txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
txd->opts2 = cpu_to_le32(opts[1]);
txd->addr = cpu_to_le64(mapping);
@@ -6108,6 +6049,15 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return true;
}
+static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
+ unsigned int nr_frags)
+{
+ unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
+
+ /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+ return slots_avail > nr_frags;
+}
+
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -6116,11 +6066,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct TxDesc *txd = tp->TxDescArray + entry;
struct device *d = tp_to_dev(tp);
dma_addr_t mapping;
- u32 status, len;
- u32 opts[2];
+ u32 opts[2], len;
+ bool stop_queue;
int frags;
- if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
+ if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -6159,32 +6109,26 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
- /* Anti gcc 2.95.3 bugware (sic) */
- status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
- txd->opts1 = cpu_to_le32(status);
+ txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
/* Force all memory writes to complete before notifying device */
wmb();
tp->cur_tx += frags + 1;
- RTL_W8(tp, TxPoll, NPQ);
+ stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ if (unlikely(stop_queue))
+ netif_stop_queue(dev);
- mmiowb();
+ if (__netdev_sent_queue(dev, skb->len, skb->xmit_more))
+ RTL_W8(tp, TxPoll, NPQ);
- if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
- /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
- * not miss a ring update when it notices a stopped queue.
- */
- smp_wmb();
- netif_stop_queue(dev);
+ if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -6193,7 +6137,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* can't.
*/
smp_mb();
- if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
+ if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
netif_wake_queue(dev);
}
@@ -6257,7 +6201,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
+ int budget)
{
unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
@@ -6285,7 +6230,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
if (status & LastFrag) {
pkts_compl++;
bytes_compl += tx_skb->skb->len;
- dev_consume_skb_any(tx_skb->skb);
+ napi_consume_skb(tx_skb->skb, budget);
tx_skb->skb = NULL;
}
dirty_tx++;
@@ -6310,7 +6255,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
*/
smp_mb();
if (netif_queue_stopped(dev) &&
- TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+ rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
/*
@@ -6461,7 +6406,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
struct rtl8169_private *tp = dev_instance;
u16 status = rtl_get_events(tp);
- if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
+ if (status == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -6528,13 +6473,11 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
work_done = rtl_rx(dev, tp, (u32) budget);
- rtl_tx(dev, tp);
+ rtl_tx(dev, tp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
-
- rtl_irq_enable_all(tp);
- mmiowb();
+ rtl_irq_enable(tp);
}
return work_done;
@@ -6584,7 +6527,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
phy_set_max_speed(phydev, SPEED_100);
/* Ensure to advertise everything, incl. pause */
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
phy_attached_info(phydev);
@@ -6824,8 +6767,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
static int rtl8169_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_net_suspend(dev);
@@ -6855,8 +6797,7 @@ static void __rtl8169_resume(struct net_device *dev)
static int rtl8169_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
clk_prepare_enable(tp->clk);
@@ -6869,8 +6810,7 @@ static int rtl8169_resume(struct device *device)
static int rtl8169_runtime_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
if (!tp->TxDescArray)
@@ -6891,8 +6831,7 @@ static int rtl8169_runtime_suspend(struct device *device)
static int rtl8169_runtime_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
rtl_rar_set(tp, dev->dev_addr);
@@ -6910,8 +6849,7 @@ static int rtl8169_runtime_resume(struct device *device)
static int rtl8169_runtime_idle(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
if (!netif_running(dev) || !netif_carrier_ok(dev))
pm_schedule_suspend(device, 10000);
@@ -7023,31 +6961,26 @@ static const struct net_device_ops rtl_netdev_ops = {
static const struct rtl_cfg_info {
void (*hw_start)(struct rtl8169_private *tp);
- u16 event_slow;
+ u16 irq_mask;
unsigned int has_gmii:1;
const struct rtl_coalesce_info *coalesce_info;
- u8 default_ver;
} rtl_cfg_infos [] = {
[RTL_CFG_0] = {
.hw_start = rtl_hw_start_8169,
- .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
+ .irq_mask = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
.has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8169,
- .default_ver = RTL_GIGA_MAC_VER_01,
},
[RTL_CFG_1] = {
.hw_start = rtl_hw_start_8168,
- .event_slow = SYSErr | LinkChg | RxOverflow,
+ .irq_mask = LinkChg | RxOverflow,
.has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8168_8136,
- .default_ver = RTL_GIGA_MAC_VER_11,
},
[RTL_CFG_2] = {
.hw_start = rtl_hw_start_8101,
- .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
- PCSTimeout,
+ .irq_mask = LinkChg | RxOverflow | RxFIFOOver,
.coalesce_info = rtl_coalesce_info_8168_8136,
- .default_ver = RTL_GIGA_MAC_VER_13,
}
};
@@ -7309,11 +7242,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mmio_addr = pcim_iomap_table(pdev)[region];
- if (!pci_is_pcie(pdev))
- dev_info(&pdev->dev, "not PCI Express\n");
-
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp, cfg->default_ver);
+ rtl8169_get_mac_version(tp);
+ if (tp->mac_version == RTL_GIGA_MAC_NONE)
+ return -ENODEV;
if (rtl_tbi_enabled(tp)) {
dev_err(&pdev->dev, "TBI fiber mode not supported\n");
@@ -7351,8 +7283,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_init_mdio_ops(tp);
rtl_init_jumbo_ops(tp);
- rtl8169_print_mac_version(tp);
-
chipset = tp->mac_version;
rc = rtl_alloc_irq(tp);
@@ -7426,7 +7356,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->max_mtu = jumbo_max;
tp->hw_start = cfg->hw_start;
- tp->event_slow = cfg->event_slow;
+ tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask;
tp->coalesce_info = cfg->coalesce_info;
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
@@ -7450,9 +7380,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_mdio_unregister;
- netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
+ netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr,
- (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
+ (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
pci_irq_vector(pdev, 0));
if (jumbo_max > JUMBO_1K)
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 1c6e4df94f01..ac9195add811 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1032,7 +1032,6 @@ struct ravb_private {
phy_interface_t phy_interface;
int msg_enable;
int speed;
- int duplex;
int emac_irq;
enum ravb_chip_id chip_id;
int rx_irqs[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index defed0d0c51d..ffc1ada4e6da 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -82,13 +82,6 @@ static int ravb_config(struct net_device *ndev)
return error;
}
-static void ravb_set_duplex(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
-
- ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
-}
-
static void ravb_set_rate(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -406,13 +399,11 @@ error:
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
- struct ravb_private *priv = netdev_priv(ndev);
-
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
- ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
+ ravb_write(ndev, ECMR_ZPF | ECMR_DM |
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
@@ -995,12 +986,6 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_rcv_snd_disable(ndev);
if (phydev->link) {
- if (phydev->duplex != priv->duplex) {
- new_state = true;
- priv->duplex = phydev->duplex;
- ravb_set_duplex(ndev);
- }
-
if (phydev->speed != priv->speed) {
new_state = true;
priv->speed = phydev->speed;
@@ -1015,7 +1000,6 @@ static void ravb_adjust_link(struct net_device *ndev)
new_state = true;
priv->link = 0;
priv->speed = 0;
- priv->duplex = -1;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */
@@ -1045,7 +1029,6 @@ static int ravb_phy_init(struct net_device *ndev)
priv->link = 0;
priv->speed = 0;
- priv->duplex = -1;
/* Try connecting to PHY */
pn = of_parse_phandle(np, "phy-handle", 0);
@@ -1088,6 +1071,10 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ /* Half Duplex is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index beb06628f22d..6213827e3956 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1632,9 +1632,6 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
- if (netif_is_bridge_master(vlan->obj.orig_dev))
- return -EOPNOTSUPP;
-
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
@@ -2145,8 +2142,6 @@ static int rocker_port_obj_del(struct net_device *dev,
static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_attr_get = rocker_port_attr_get,
.switchdev_port_attr_set = rocker_port_attr_set,
- .switchdev_port_obj_add = rocker_port_obj_add,
- .switchdev_port_obj_del = rocker_port_obj_del,
};
struct rocker_fib_event_work {
@@ -2812,12 +2807,54 @@ static int rocker_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
+static int
+rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = rocker_port_obj_add(netdev, port_obj_info->obj,
+ port_obj_info->trans);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = rocker_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int rocker_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return rocker_switchdev_port_obj_event(event, dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
static struct notifier_block rocker_switchdev_notifier = {
.notifier_call = rocker_switchdev_event,
};
+static struct notifier_block rocker_switchdev_blocking_notifier = {
+ .notifier_call = rocker_switchdev_blocking_event,
+};
+
static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct notifier_block *nb;
struct rocker *rocker;
int err;
@@ -2933,6 +2970,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_register_switchdev_notifier;
}
+ nb = &rocker_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n");
+ goto err_register_switchdev_blocking_notifier;
+ }
+
rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
@@ -2940,6 +2984,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_register_switchdev_blocking_notifier:
+ unregister_switchdev_notifier(&rocker_switchdev_notifier);
err_register_switchdev_notifier:
unregister_fib_notifier(&rocker->fib_nb);
err_register_fib_notifier:
@@ -2971,6 +3017,10 @@ err_pci_enable_device:
static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ struct notifier_block *nb;
+
+ nb = &rocker_switchdev_blocking_notifier;
+ unregister_switchdev_blocking_notifier(nb);
unregister_switchdev_notifier(&rocker_switchdev_notifier);
unregister_fib_notifier(&rocker->fib_nb);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7eeac3d6cfe8..b6a50058bb8d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6041,6 +6041,10 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+ /* MUM and SUC firmware share the same partition type */
+ { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
+ { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
};
static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
@@ -6091,6 +6095,9 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
part->common.mtd.flags = MTD_CAP_NORFLASH;
part->common.mtd.size = size;
part->common.mtd.erasesize = erase_size;
+ /* sfc_status is read-only */
+ if (!erase_size)
+ part->common.mtd.flags |= MTD_NO_ERASE;
return 0;
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c3ad564ac4c0..22eb059086f7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -553,13 +553,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
goto err;
- /* Update BQL */
- netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
-
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index d9d0d03e4ce7..bba9733b5119 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -234,6 +234,9 @@
#define DESC_NUM 256
+#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define NETSEC_RX_BUF_SZ 1536
+
#define DESC_SZ sizeof(struct netsec_de)
#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
@@ -571,34 +574,10 @@ static const struct ethtool_ops netsec_ethtool_ops = {
/************* NETDEV_OPS FOLLOW *************/
-static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
- struct netsec_desc *desc)
-{
- struct sk_buff *skb;
-
- if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
- skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
- } else {
- desc->len = L1_CACHE_ALIGN(desc->len);
- skb = netdev_alloc_skb(priv->ndev, desc->len);
- }
- if (!skb)
- return NULL;
-
- desc->addr = skb->data;
- desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->dev, desc->dma_addr)) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
- return skb;
-}
static void netsec_set_rx_de(struct netsec_priv *priv,
struct netsec_desc_ring *dring, u16 idx,
- const struct netsec_desc *desc,
- struct sk_buff *skb)
+ const struct netsec_desc *desc)
{
struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
@@ -617,59 +596,6 @@ static void netsec_set_rx_de(struct netsec_priv *priv,
dring->desc[idx].dma_addr = desc->dma_addr;
dring->desc[idx].addr = desc->addr;
dring->desc[idx].len = desc->len;
- dring->desc[idx].skb = skb;
-}
-
-static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
- struct netsec_desc_ring *dring,
- u16 idx,
- struct netsec_rx_pkt_info *rxpi,
- struct netsec_desc *desc, u16 *len)
-{
- struct netsec_de de = {};
-
- memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
-
- *len = de.buf_len_info >> 16;
-
- rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
- rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
- rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
- NETSEC_RX_PKT_ERR_MASK;
- *desc = dring->desc[idx];
- return desc->skb;
-}
-
-static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
- struct netsec_rx_pkt_info *rxpi,
- struct netsec_desc *desc,
- u16 *len)
-{
- struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- struct sk_buff *tmp_skb, *skb = NULL;
- struct netsec_desc td;
- int tail;
-
- *rxpi = (struct netsec_rx_pkt_info){};
-
- td.len = priv->ndev->mtu + 22;
-
- tmp_skb = netsec_alloc_skb(priv, &td);
-
- tail = dring->tail;
-
- if (!tmp_skb) {
- netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
- dring->desc[tail].skb);
- } else {
- skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
- netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
- }
-
- /* move tail ahead */
- dring->tail = (dring->tail + 1) % DESC_NUM;
-
- return skb;
}
static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
@@ -736,19 +662,65 @@ static int netsec_process_tx(struct netsec_priv *priv, int budget)
return done;
}
+static void *netsec_alloc_rx_data(struct netsec_priv *priv,
+ dma_addr_t *dma_handle, u16 *desc_len)
+{
+ size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size_t payload_len = NETSEC_RX_BUF_SZ;
+ dma_addr_t mapping;
+ void *buf;
+
+ total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
+
+ buf = napi_alloc_frag(total_len);
+ if (!buf)
+ return NULL;
+
+ mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping)))
+ goto err_out;
+
+ *dma_handle = mapping;
+ *desc_len = payload_len;
+
+ return buf;
+
+err_out:
+ skb_free_frag(buf);
+ return NULL;
+}
+
+static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ u16 idx = from;
+
+ while (num) {
+ netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
+ idx++;
+ if (idx >= DESC_NUM)
+ idx = 0;
+ num--;
+ }
+}
+
static int netsec_process_rx(struct netsec_priv *priv, int budget)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct net_device *ndev = priv->ndev;
struct netsec_rx_pkt_info rx_info;
- int done = 0;
- struct netsec_desc desc;
struct sk_buff *skb;
- u16 len;
+ int done = 0;
while (done < budget) {
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
+ struct netsec_desc *desc = &dring->desc[idx];
+ u16 pkt_len, desc_len;
+ dma_addr_t dma_handle;
+ void *buf_addr;
+ u32 truesize;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
/* reading the register clears the irq */
@@ -762,18 +734,59 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
*/
dma_rmb();
done++;
- skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
- if (unlikely(!skb) || rx_info.err_flag) {
+
+ pkt_len = de->buf_len_info >> 16;
+ rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ if (rx_info.err_flag) {
netif_err(priv, drv, priv->ndev,
- "%s: rx fail err(%d)\n",
- __func__, rx_info.err_code);
+ "%s: rx fail err(%d)\n", __func__,
+ rx_info.err_code);
ndev->stats.rx_dropped++;
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+ /* reuse buffer page frag */
+ netsec_rx_fill(priv, idx, 1);
continue;
}
+ rx_info.rx_cksum_result =
+ (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
- dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
- DMA_FROM_DEVICE);
- skb_put(skb, len);
+ /* allocate a fresh buffer and map it to the hardware.
+ * This will eventually replace the old buffer in the hardware
+ */
+ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+ if (unlikely(!buf_addr))
+ break;
+
+ dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(desc->addr);
+
+ truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb = build_skb(desc->addr, truesize);
+ if (unlikely(!skb)) {
+ /* free the newly allocated buffer, we are not going to
+ * use it
+ */
+ dma_unmap_single(priv->dev, dma_handle, desc_len,
+ DMA_FROM_DEVICE);
+ skb_free_frag(buf_addr);
+ netif_err(priv, drv, priv->ndev,
+ "rx failed to build skb\n");
+ break;
+ }
+ dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+
+ /* Update the descriptor with the new buffer we allocated */
+ desc->len = desc_len;
+ desc->dma_addr = dma_handle;
+ desc->addr = buf_addr;
+
+ skb_reserve(skb, NETSEC_SKB_PAD);
+ skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
@@ -782,8 +795,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += len;
+ ndev->stats.rx_bytes += pkt_len;
}
+
+ netsec_rx_fill(priv, idx, 1);
+ dring->tail = (dring->tail + 1) % DESC_NUM;
}
return done;
@@ -946,7 +962,10 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
DMA_TO_DEVICE);
- dev_kfree_skb(desc->skb);
+ if (id == NETSEC_RING_RX)
+ skb_free_frag(desc->addr);
+ else if (id == NETSEC_RING_TX)
+ dev_kfree_skb(desc->skb);
}
memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
@@ -977,47 +996,50 @@ static void netsec_free_dring(struct netsec_priv *priv, int id)
static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
{
struct netsec_desc_ring *dring = &priv->desc_ring[id];
- int ret = 0;
dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
&dring->desc_dma, GFP_KERNEL);
- if (!dring->vaddr) {
- ret = -ENOMEM;
+ if (!dring->vaddr)
goto err;
- }
dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
- if (!dring->desc) {
- ret = -ENOMEM;
+ if (!dring->desc)
goto err;
- }
return 0;
err:
netsec_free_dring(priv, id);
- return ret;
+ return -ENOMEM;
}
static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- struct netsec_desc desc;
- struct sk_buff *skb;
- int n;
+ int i;
- desc.len = priv->ndev->mtu + 22;
+ for (i = 0; i < DESC_NUM; i++) {
+ struct netsec_desc *desc = &dring->desc[i];
+ dma_addr_t dma_handle;
+ void *buf;
+ u16 len;
- for (n = 0; n < DESC_NUM; n++) {
- skb = netsec_alloc_skb(priv, &desc);
- if (!skb) {
+ buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+ if (!buf) {
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
- return -ENOMEM;
+ goto err_out;
}
- netsec_set_rx_de(priv, dring, n, &desc, skb);
+ desc->dma_addr = dma_handle;
+ desc->addr = buf;
+ desc->len = len;
}
+ netsec_rx_fill(priv, 0, DESC_NUM);
+
return 0;
+
+err_out:
+ return -ENOMEM;
}
static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
@@ -1377,6 +1399,8 @@ static int netsec_netdev_init(struct net_device *ndev)
int ret;
u16 data;
+ BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
+
ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 6732f5cbde08..9e7391faa1dc 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1117,7 +1117,7 @@ static void ave_phy_adjust_link(struct net_device *ndev)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (cap & FLOW_CTRL_TX)
txcr |= AVE_TXCR_FLOCTR;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5710864fa809..d1f61c25d82b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -458,8 +458,10 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return;
} else {
- if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
- !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ netdev->phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ netdev->phydev->supported))
return;
}
@@ -487,8 +489,10 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return -EOPNOTSUPP;
} else {
- if (!(phy->supported & SUPPORTED_Pause) ||
- !(phy->supported & SUPPORTED_Asym_Pause))
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phy->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phy->supported))
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 500f7ed8c58c..e4aa030f1726 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -283,7 +283,7 @@ struct cpsw_ss_regs {
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
@@ -293,7 +293,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
- TS_LTYPE1_EN)
+ TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
@@ -466,6 +466,8 @@ struct cpsw_priv {
bool mqprio_hw;
int fifo_bw[CPSW_TC_NUM];
int shp_cfg_speed;
+ int tx_ts_enabled;
+ int rx_ts_enabled;
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -565,26 +567,14 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
(func)(slave++, ##arg); \
} while (0)
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid);
+
static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
-static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
-{
- struct cpsw_common *cpsw = priv->cpsw;
-
- if (cpsw->data.dual_emac) {
- struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
-
- cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
- ALE_VLAN, slave->port_vlan, 0);
- return;
- }
-
- cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
-}
-
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -640,7 +630,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
- __dev_mc_unsync(ndev, NULL);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -661,29 +651,148 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
+struct addr_sync_ctx {
+ struct net_device *ndev;
+ const u8 *addr; /* address to be synched */
+ int consumed; /* number of address instances */
+ int flush; /* flush flag */
+};
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret;
+
+ if (vid < 0) {
+ if (cpsw->data.dual_emac)
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
+ else
+ vid = 0;
+ }
+
+ mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
- cpsw_add_mcast(priv, addr);
return 0;
}
-static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int vid, flags;
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
- if (cpsw->data.dual_emac) {
- vid = cpsw->slaves[priv->emac_port].port_vlan;
- flags = ALE_VLAN;
- } else {
- vid = 0;
- flags = 0;
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
}
- cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
return 0;
}
@@ -704,7 +813,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* Restore allmulti on vlans if necessary */
cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
- __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
}
static void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -796,6 +907,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct net_device *ndev = skb->dev;
int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv;
if (cpsw->data.dual_emac) {
port = CPDMA_RX_SOURCE_PORT(status);
@@ -830,7 +942,9 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_put(skb, len);
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
- cpts_rx_timestamp(cpsw->cpts, skb);
+ priv = netdev_priv(ndev);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -1845,9 +1959,23 @@ static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_write(slave, tx_prio_map, tx_prio_rg);
}
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
/* restore resources after port reset */
static void cpsw_restore(struct cpsw_priv *priv)
{
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
/* restore MQPRIO offload */
for_each_slave(priv, cpsw_mqprio_resume, priv);
@@ -1964,7 +2092,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
- __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
@@ -2003,7 +2131,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
q_idx = skb_get_queue_mapping(skb);
@@ -2047,13 +2175,13 @@ fail:
#if IS_ENABLED(CONFIG_TI_CPTS)
-static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
{
+ struct cpsw_common *cpsw = priv->cpsw;
struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!cpts_is_tx_enabled(cpsw->cpts) &&
- !cpts_is_rx_enabled(cpsw->cpts)) {
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -2061,10 +2189,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ts_en |= CPSW_V1_TS_TX_EN;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -2084,20 +2212,20 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V2_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V3_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -2107,6 +2235,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2114,7 +2243,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
struct cpsw_common *cpsw = priv->cpsw;
- struct cpts *cpts = cpsw->cpts;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
@@ -2133,7 +2261,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ priv->rx_ts_enabled = 0;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -2141,7 +2269,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2153,18 +2281,18 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(cpsw);
+ cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
@@ -2180,7 +2308,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
- struct cpts *cpts = cpsw->cpts;
+ struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
@@ -2189,10 +2317,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2415,6 +2541,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
+ ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index b96b93c686bf..054f78295d1d 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -86,6 +86,25 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1;
}
+static void cpts_purge_txq(struct cpts *cpts)
+{
+ struct cpts_skb_cb_data *skb_cb;
+ struct sk_buff *skb, *tmp;
+ int removed = 0;
+
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ if (time_after(jiffies, skb_cb->tmo)) {
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
+}
+
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
{
struct sk_buff *skb, *tmp;
@@ -119,9 +138,7 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
- dev_dbg(cpts->dev,
- "expiring tx timestamp mtype %u seqid %04x\n",
- mtype, seqid);
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
}
@@ -294,8 +311,11 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
spin_lock_irqsave(&cpts->lock, flags);
ts = ns_to_timespec64(timecounter_read(&cpts->tc));
- if (!skb_queue_empty(&cpts->txq))
- delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ if (!skb_queue_empty(&cpts->txq)) {
+ cpts_purge_txq(cpts);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ }
spin_unlock_irqrestore(&cpts->lock, flags);
pr_debug("cpts overflow check at %lld.%09ld\n",
@@ -410,8 +430,6 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
u64 ns;
struct skb_shared_hwtstamps *ssh;
- if (!cpts->rx_enable)
- return;
ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
if (!ns)
return;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 73d73faf0f38..d2c7decd59b6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -136,26 +136,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
void cpts_release(struct cpts *cpts);
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
- cpts->rx_enable = enable;
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return !!cpts->rx_enable;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
- cpts->tx_enable = enable;
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return !!cpts->tx_enable;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
unsigned int class = ptp_classify_raw(skb);
@@ -197,24 +177,6 @@ static inline void cpts_unregister(struct cpts *cpts)
{
}
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
return false;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0397ccb6597e..20d81e0b1c29 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -763,6 +763,8 @@ struct gbe_priv {
int cpts_registered;
struct cpts *cpts;
+ int rx_ts_enabled;
+ int tx_ts_enabled;
};
struct gbe_intf {
@@ -2564,7 +2566,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
- !cpts_is_tx_enabled(gbe_dev->cpts))
+ !gbe_dev->tx_ts_enabled)
return 0;
/* If phy has the txtstamp api, assume it will do it.
@@ -2598,7 +2600,9 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
return 0;
}
- cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+ if (gbe_dev->rx_ts_enabled)
+ cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+
p_info->rxtstamp_complete = true;
return 0;
@@ -2614,10 +2618,8 @@ static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = gbe_dev->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2628,8 +2630,8 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
u32 ts_en, seq_id, ctl;
- if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
- !cpts_is_tx_enabled(gbe_dev->cpts)) {
+ if (!gbe_dev->rx_ts_enabled &&
+ !gbe_dev->tx_ts_enabled) {
writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
return;
}
@@ -2641,10 +2643,10 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
(slave->ts_ctl.uni ? TS_UNI_EN :
slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
- if (cpts_is_tx_enabled(gbe_dev->cpts))
+ if (gbe_dev->tx_ts_enabled)
ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
- if (cpts_is_rx_enabled(gbe_dev->cpts))
+ if (gbe_dev->rx_ts_enabled)
ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
@@ -2670,10 +2672,10 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
- cpts_tx_enable(cpts, 0);
+ gbe_dev->tx_ts_enabled = 0;
break;
case HWTSTAMP_TX_ON:
- cpts_tx_enable(cpts, 1);
+ gbe_dev->tx_ts_enabled = 1;
break;
default:
return -ERANGE;
@@ -2681,12 +2683,12 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2698,7 +2700,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 6a71c2c0f17d..c50a9772f4af 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -607,9 +607,9 @@ static void tc_handle_link_change(struct net_device *dev)
static int tc_mii_probe(struct net_device *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct tc35815_local *lp = netdev_priv(dev);
struct phy_device *phydev;
- u32 dropmask;
phydev = phy_find_first(lp->mii_bus);
if (!phydev) {
@@ -630,17 +630,22 @@ static int tc_mii_probe(struct net_device *dev)
/* mask with MAC supported features */
phy_set_max_speed(phydev, SPEED_100);
- dropmask = 0;
- if (options.speed == 10)
- dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
- else if (options.speed == 100)
- dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
- if (options.duplex == 1)
- dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
- else if (options.duplex == 2)
- dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
- phydev->supported &= ~dropmask;
- phydev->advertising = phydev->supported;
+ if (options.speed == 10) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ } else if (options.speed == 100) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+ }
+ if (options.duplex == 1) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ } else if (options.duplex == 2) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ }
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
lp->link = 0;
lp->speed = 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index a0cd1c41cf5f..58bbba8582b0 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -70,6 +70,7 @@ struct geneve_dev {
bool collect_md;
bool use_udp6_rx_checksums;
bool ttl_inherit;
+ enum ifla_geneve_df df;
};
struct geneve_sock {
@@ -387,6 +388,59 @@ drop:
return 0;
}
+/* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */
+static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct genevehdr *geneveh;
+ struct geneve_sock *gs;
+ u8 zero_vni[3] = { 0 };
+ u8 *vni = zero_vni;
+
+ if (skb->len < GENEVE_BASE_HLEN)
+ return -EINVAL;
+
+ geneveh = geneve_hdr(skb);
+ if (geneveh->ver != GENEVE_VER)
+ return -EINVAL;
+
+ if (geneveh->proto_type != htons(ETH_P_TEB))
+ return -EINVAL;
+
+ gs = rcu_dereference_sk_user_data(sk);
+ if (!gs)
+ return -ENOENT;
+
+ if (geneve_get_sk_family(gs) == AF_INET) {
+ struct iphdr *iph = ip_hdr(skb);
+ __be32 addr4 = 0;
+
+ if (!gs->collect_md) {
+ vni = geneve_hdr(skb)->vni;
+ addr4 = iph->daddr;
+ }
+
+ return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT;
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (geneve_get_sk_family(gs) == AF_INET6) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct in6_addr addr6;
+
+ memset(&addr6, 0, sizeof(struct in6_addr));
+
+ if (!gs->collect_md) {
+ vni = geneve_hdr(skb)->vni;
+ addr6 = ip6h->daddr;
+ }
+
+ return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT;
+ }
+#endif
+
+ return -EPFNOSUPPORT;
+}
+
static struct socket *geneve_create_sock(struct net *net, bool ipv6,
__be16 port, bool ipv6_rx_csum)
{
@@ -544,6 +598,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
tunnel_cfg.gro_receive = geneve_gro_receive;
tunnel_cfg.gro_complete = geneve_gro_complete;
tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+ tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup;
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
list_add(&gs->list, &gn->sock_list);
@@ -823,8 +878,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt;
struct flowi4 fl4;
__u8 tos, ttl;
+ __be16 df = 0;
__be16 sport;
- __be16 df;
int err;
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
@@ -838,6 +893,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
+
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
if (geneve->ttl_inherit)
@@ -845,8 +902,22 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
else
ttl = key->ttl;
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+ if (geneve->df == GENEVE_DF_SET) {
+ df = htons(IP_DF);
+ } else if (geneve->df == GENEVE_DF_INHERIT) {
+ struct ethhdr *eth = eth_hdr(skb);
+
+ if (ntohs(eth->h_proto) == ETH_P_IPV6) {
+ df = htons(IP_DF);
+ } else if (ntohs(eth->h_proto) == ETH_P_IP) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->frag_off & htons(IP_DF))
+ df = htons(IP_DF);
+ }
+ }
}
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
if (unlikely(err))
@@ -1093,6 +1164,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
+ [IFLA_GENEVE_DF] = { .type = NLA_U8 },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1128,6 +1200,16 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_GENEVE_DF]) {
+ enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]);
+
+ if (df < 0 || df > GENEVE_DF_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF],
+ "Invalid DF attribute");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1173,7 +1255,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack,
const struct ip_tunnel_info *info,
bool metadata, bool ipv6_rx_csum,
- bool ttl_inherit)
+ bool ttl_inherit, enum ifla_geneve_df df)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -1223,6 +1305,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
geneve->collect_md = metadata;
geneve->use_udp6_rx_checksums = ipv6_rx_csum;
geneve->ttl_inherit = ttl_inherit;
+ geneve->df = df;
err = register_netdevice(dev);
if (err)
@@ -1242,7 +1325,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack,
struct ip_tunnel_info *info, bool *metadata,
bool *use_udp6_rx_checksums, bool *ttl_inherit,
- bool changelink)
+ enum ifla_geneve_df *df, bool changelink)
{
int attrtype;
@@ -1330,6 +1413,9 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_GENEVE_TOS])
info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+ if (data[IFLA_GENEVE_DF])
+ *df = nla_get_u8(data[IFLA_GENEVE_DF]);
+
if (data[IFLA_GENEVE_LABEL]) {
info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
IPV6_FLOWLABEL_MASK;
@@ -1448,6 +1534,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ enum ifla_geneve_df df = GENEVE_DF_UNSET;
bool use_udp6_rx_checksums = false;
struct ip_tunnel_info info;
bool ttl_inherit = false;
@@ -1456,12 +1543,12 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
init_tnl_info(&info, GENEVE_UDP_PORT);
err = geneve_nl2info(tb, data, extack, &info, &metadata,
- &use_udp6_rx_checksums, &ttl_inherit, false);
+ &use_udp6_rx_checksums, &ttl_inherit, &df, false);
if (err)
return err;
err = geneve_configure(net, dev, extack, &info, metadata,
- use_udp6_rx_checksums, ttl_inherit);
+ use_udp6_rx_checksums, ttl_inherit, df);
if (err)
return err;
@@ -1524,6 +1611,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_info info;
bool metadata;
bool use_udp6_rx_checksums;
+ enum ifla_geneve_df df;
bool ttl_inherit;
int err;
@@ -1539,7 +1627,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
use_udp6_rx_checksums = geneve->use_udp6_rx_checksums;
ttl_inherit = geneve->ttl_inherit;
err = geneve_nl2info(tb, data, extack, &info, &metadata,
- &use_udp6_rx_checksums, &ttl_inherit, true);
+ &use_udp6_rx_checksums, &ttl_inherit, &df, true);
if (err)
return err;
@@ -1572,6 +1660,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_DF */
nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */
nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
@@ -1620,6 +1709,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df))
+ goto nla_put_failure;
+
if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
goto nla_put_failure;
@@ -1666,12 +1758,13 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
memset(tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
- &geneve_link_ops, tb);
+ &geneve_link_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
init_tnl_info(&info, dst_port);
- err = geneve_configure(net, dev, NULL, &info, true, true, false);
+ err = geneve_configure(net, dev, NULL, &info,
+ true, true, false, GENEVE_DF_UNSET);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf36e7ff3191..85936ed9e952 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -605,9 +605,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
IEEE_8021Q_INFO);
vlan->value = 0;
- vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
- vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ vlan->vlanid = skb_vlan_tag_get_id(skb);
+ vlan->cfi = skb_vlan_tag_get_cfi(skb);
+ vlan->pri = skb_vlan_tag_get_prio(skb);
}
if (skb_is_gso(skb)) {
@@ -781,7 +781,8 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
}
if (vlan) {
- u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
+ u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
+ (vlan->cfi ? VLAN_CFI_MASK : 0);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tci);
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index cb3518474f0e..a1b29173ca1c 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -91,11 +91,6 @@ static int nsim_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
-static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
- .insn_hook = nsim_bpf_verify_insn,
- .finalize = nsim_bpf_finalize,
-};
-
static bool nsim_xdp_offload_active(struct netdevsim *ns)
{
return ns->xdp_hw.prog;
@@ -263,6 +258,24 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
return 0;
}
+static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
+{
+ struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev);
+
+ if (!ns->bpf_bind_accept)
+ return -EOPNOTSUPP;
+
+ return nsim_bpf_create_prog(ns, prog);
+}
+
+static int nsim_bpf_translate(struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;
+
+ state->state = "xlated";
+ return 0;
+}
+
static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
@@ -275,6 +288,14 @@ static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
kfree(state);
}
+static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = {
+ .insn_hook = nsim_bpf_verify_insn,
+ .finalize = nsim_bpf_finalize,
+ .prepare = nsim_bpf_verifier_prep,
+ .translate = nsim_bpf_translate,
+ .destroy = nsim_bpf_destroy_prog,
+};
+
static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
{
if (bpf->prog && bpf->prog->aux->offload) {
@@ -533,30 +554,11 @@ static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bpf_bound_prog *state;
int err;
ASSERT_RTNL();
switch (bpf->command) {
- case BPF_OFFLOAD_VERIFIER_PREP:
- if (!ns->bpf_bind_accept)
- return -EOPNOTSUPP;
-
- err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
- if (err)
- return err;
-
- bpf->verifier.ops = &nsim_bpf_analyzer_ops;
- return 0;
- case BPF_OFFLOAD_TRANSLATE:
- state = bpf->offload.prog->aux->offload->dev_priv;
-
- state->state = "xlated";
- return 0;
- case BPF_OFFLOAD_DESTROY:
- nsim_bpf_destroy_prog(bpf->offload.prog);
- return 0;
case XDP_QUERY_PROG:
return xdp_attachment_query(&ns->xdp, bpf);
case XDP_QUERY_PROG_HW:
@@ -599,7 +601,7 @@ int nsim_bpf_init(struct netdevsim *ns)
if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
return -ENOMEM;
- ns->sdev->bpf_dev = bpf_offload_dev_create();
+ ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
if (err)
return err;
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 6fe5dc9201d0..9d0504f3e3b2 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -66,7 +66,6 @@ static struct phy_driver am79c_driver[] = { {
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = am79c_config_init,
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index 632472cab3bb..beb3309bb0f0 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -25,15 +25,10 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQR405 0x03a1b4b0
-#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
- SUPPORTED_1000baseT_Full | \
- SUPPORTED_100baseT_Full | \
- PHY_DEFAULT_FEATURES)
-
static int aquantia_config_aneg(struct phy_device *phydev)
{
- phydev->supported = PHY_AQUANTIA_FEATURES;
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->supported, phy_10gbit_features);
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
@@ -116,7 +111,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQ1202",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -128,7 +122,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQ2104",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -140,7 +133,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR105",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -152,7 +144,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR106",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -164,7 +155,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR107",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -176,7 +166,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR405",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index e74a047a846e..f9432d053a22 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -379,7 +379,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -395,7 +394,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -410,7 +408,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index d95bffdec4c1..a88dd14a25c0 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -43,7 +43,7 @@ static int bcm63xx_config_init(struct phy_device *phydev)
int reg, err;
/* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
reg = phy_read(phydev, MII_BCM63XX_IR);
if (reg < 0)
@@ -69,7 +69,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
+ .flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
@@ -78,7 +78,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
+ .flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b2b6307d64a4..712224cc442d 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -650,6 +650,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"),
@@ -670,6 +671,7 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
+ { PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
{ PHY_ID_BCM7268, 0xfffffff0, },
{ PHY_ID_BCM7271, 0xfffffff0, },
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index f7ebdcff53e4..1b350183bffb 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -86,8 +86,12 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
static int bcm87xx_config_init(struct phy_device *phydev)
{
- phydev->supported = SUPPORTED_10000baseR_FEC;
- phydev->advertising = ADVERTISED_10000baseR_FEC;
+ linkmode_zero(phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ phydev->supported);
+ linkmode_zero(phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ phydev->advertising);
phydev->state = PHY_NOLINK;
phydev->autoneg = AUTONEG_DISABLE;
@@ -193,7 +197,6 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
@@ -205,7 +208,6 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 704537010453..aa73c5cc5f86 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -602,7 +602,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -611,7 +610,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -620,7 +618,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -629,7 +626,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -638,7 +634,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -647,7 +642,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -657,7 +651,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -666,7 +659,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -676,7 +668,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -686,7 +677,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm5482_config_init,
.read_status = bcm5482_read_status,
.ack_interrupt = bcm_phy_ack_intr,
@@ -696,7 +686,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -705,7 +694,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -714,7 +702,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -723,7 +710,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -732,7 +718,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -751,7 +736,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index c05af00bf4b6..fea61c81bda9 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -108,7 +108,6 @@ static struct phy_driver cis820x_driver[] = {
.name = "Cicada Cis8201",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
@@ -117,7 +116,6 @@ static struct phy_driver cis820x_driver[] = {
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5ee99b3b428c..97162008f42b 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -150,7 +150,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -160,7 +159,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161B/C",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -170,7 +168,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -180,7 +177,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
} };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index edd4d44a386d..18b41bc345ab 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1521,7 +1521,6 @@ static struct phy_driver dp83640_driver = {
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
.soft_reset = dp83640_soft_reset,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 6e8a2a4f3a6e..24c7f149f3e6 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -318,7 +318,6 @@ static struct phy_driver dp83822_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83822",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83822_config_init,
.soft_reset = dp83822_phy_reset,
.get_wol = dp83822_get_wol,
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 6e8e42361fd5..a6b55909d1dc 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -108,7 +108,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.phy_id_mask = 0xfffffff0, \
.name = _name, \
.features = PHY_BASIC_FEATURES, \
- .flags = PHY_HAS_INTERRUPT, \
\
.soft_reset = genphy_soft_reset, \
.config_init = _config_init, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index b3935778b19f..da6a67d47ce9 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -334,7 +334,6 @@ static struct phy_driver dp83867_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83867",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 78cad134a79e..da13356999e5 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -346,7 +346,6 @@ static struct phy_driver dp83811_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83TC811",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83811_config_init,
.config_aneg = dp83811_config_aneg,
.soft_reset = dp83811_phy_reset,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 67b260877f30..f7fb62712cd8 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -223,14 +223,23 @@ struct phy_device *fixed_phy_register(unsigned int irq,
switch (status->speed) {
case SPEED_1000:
- phy->supported = PHY_1000BT_FEATURES;
- break;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phy->supported);
+ /* fall through */
case SPEED_100:
- phy->supported = PHY_100BT_FEATURES;
- break;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phy->supported);
+ /* fall through */
case SPEED_10:
default:
- phy->supported = PHY_10BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phy->supported);
}
ret = phy_device_register(phy);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 791587a49215..7d5938b87660 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -25,6 +25,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/property.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -36,14 +37,34 @@ MODULE_LICENSE("GPL");
/* IP101A/G - IP1001 */
#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
-#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */
-#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */
+#define IP1001_RXPHASE_SEL BIT(0) /* Add delay on RX_CLK */
+#define IP1001_TXPHASE_SEL BIT(1) /* Add delay on TX_CLK */
#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
-#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
+#define IP101A_G_APS_ON BIT(1) /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
-#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
-#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
+#define IP101A_G_IRQ_PIN_USED BIT(15) /* INTR pin used */
+#define IP101A_G_IRQ_ALL_MASK BIT(11) /* IRQ's inactive */
+#define IP101A_G_IRQ_SPEED_CHANGE BIT(2)
+#define IP101A_G_IRQ_DUPLEX_CHANGE BIT(1)
+#define IP101A_G_IRQ_LINK_CHANGE BIT(0)
+
+#define IP101G_DIGITAL_IO_SPEC_CTRL 0x1d
+#define IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32 BIT(2)
+
+/* The 32-pin IP101GR package can re-configure the mode of the RXER/INTR_32 pin
+ * (pin number 21). The hardware default is RXER (receive error) mode. But it
+ * can be configured to interrupt mode manually.
+ */
+enum ip101gr_sel_intr32 {
+ IP101GR_SEL_INTR32_KEEP,
+ IP101GR_SEL_INTR32_INTR,
+ IP101GR_SEL_INTR32_RXER,
+};
+
+struct ip101a_g_phy_priv {
+ enum ip101gr_sel_intr32 sel_intr32;
+};
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -162,18 +183,92 @@ static int ip1001_config_init(struct phy_device *phydev)
return 0;
}
+static int ip175c_read_status(struct phy_device *phydev)
+{
+ if (phydev->mdio.addr == 4) /* WAN port */
+ genphy_read_status(phydev);
+ else
+ /* Don't need to read status for switch ports */
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+
+ return 0;
+}
+
+static int ip175c_config_aneg(struct phy_device *phydev)
+{
+ if (phydev->mdio.addr == 4) /* WAN port */
+ genphy_config_aneg(phydev);
+
+ return 0;
+}
+
+static int ip101a_g_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct ip101a_g_phy_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Both functions (RX error and interrupt status) are sharing the same
+ * pin on the 32-pin IP101GR, so this is an exclusive choice.
+ */
+ if (device_property_read_bool(dev, "icplus,select-rx-error") &&
+ device_property_read_bool(dev, "icplus,select-interrupt")) {
+ dev_err(dev,
+ "RXER and INTR mode cannot be selected together\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_bool(dev, "icplus,select-rx-error"))
+ priv->sel_intr32 = IP101GR_SEL_INTR32_RXER;
+ else if (device_property_read_bool(dev, "icplus,select-interrupt"))
+ priv->sel_intr32 = IP101GR_SEL_INTR32_INTR;
+ else
+ priv->sel_intr32 = IP101GR_SEL_INTR32_KEEP;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static int ip101a_g_config_init(struct phy_device *phydev)
{
- int c;
+ struct ip101a_g_phy_priv *priv = phydev->priv;
+ int err, c;
c = ip1xx_reset(phydev);
if (c < 0)
return c;
- /* INTR pin used: speed/link/duplex will cause an interrupt */
- c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
- if (c < 0)
- return c;
+ /* configure the RXER/INTR_32 pin of the 32-pin IP101GR if needed: */
+ switch (priv->sel_intr32) {
+ case IP101GR_SEL_INTR32_RXER:
+ err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32, 0);
+ if (err < 0)
+ return err;
+ break;
+
+ case IP101GR_SEL_INTR32_INTR:
+ err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32);
+ if (err < 0)
+ return err;
+ break;
+
+ default:
+ /* Don't touch IP101G_DIGITAL_IO_SPEC_CTRL because it's not
+ * documented on IP101A and it's not clear whether this would
+ * cause problems.
+ * For the 32-pin IP101GR we simply keep the SEL_INTR32
+ * configuration as set by the bootloader when not configured
+ * to one of the special functions.
+ */
+ break;
+ }
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
@@ -182,23 +277,29 @@ static int ip101a_g_config_init(struct phy_device *phydev)
return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
}
-static int ip175c_read_status(struct phy_device *phydev)
+static int ip101a_g_config_intr(struct phy_device *phydev)
{
- if (phydev->mdio.addr == 4) /* WAN port */
- genphy_read_status(phydev);
+ u16 val;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ /* INTR pin used: Speed/link/duplex will cause an interrupt */
+ val = IP101A_G_IRQ_PIN_USED;
else
- /* Don't need to read status for switch ports */
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ val = IP101A_G_IRQ_ALL_MASK;
- return 0;
+ return phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
}
-static int ip175c_config_aneg(struct phy_device *phydev)
+static int ip101a_g_did_interrupt(struct phy_device *phydev)
{
- if (phydev->mdio.addr == 4) /* WAN port */
- genphy_config_aneg(phydev);
+ int val = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
- return 0;
+ if (val < 0)
+ return 0;
+
+ return val & (IP101A_G_IRQ_SPEED_CHANGE |
+ IP101A_G_IRQ_DUPLEX_CHANGE |
+ IP101A_G_IRQ_LINK_CHANGE);
}
static int ip101a_g_ack_interrupt(struct phy_device *phydev)
@@ -234,7 +335,9 @@ static struct phy_driver icplus_driver[] = {
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
+ .probe = ip101a_g_probe,
+ .config_intr = ip101a_g_config_intr,
+ .did_interrupt = ip101a_g_did_interrupt,
.ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 7d936fb61c22..fc0f5024a29e 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -242,7 +242,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -255,7 +254,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.3",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -268,7 +266,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -281,7 +278,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.4",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -294,7 +290,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -306,7 +301,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -318,7 +312,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.1 integrated)",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -330,7 +323,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.1 integrated)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -342,7 +334,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.2 integrated)",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -354,7 +345,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.2 integrated)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index c14b254b2879..c8bb29ae1a2a 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -177,7 +177,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
*/
} while (lpa == adv && retry--);
- phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
lpa &= adv;
@@ -218,7 +218,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
phydev->speed = SPEED_10;
phydev->pause = phydev->asym_pause = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
}
return 0;
@@ -257,7 +257,6 @@ static struct phy_driver lxt97x_driver[] = {
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = lxt970_config_init,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
@@ -266,7 +265,6 @@ static struct phy_driver lxt97x_driver[] = {
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
}, {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index cbec296107bd..6a9881942e53 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -491,25 +491,26 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
}
/**
- * ethtool_adv_to_fiber_adv_t
- * @ethadv: the ethtool advertisement settings
+ * linkmode_adv_to_fiber_adv_t
+ * @advertise: the linkmode advertisement settings
*
- * A small helper function that translates ethtool advertisement
- * settings to phy autonegotiation advertisements for the
- * MII_ADV register for fiber link.
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the MII_ADV
+ * register for fiber link.
*/
-static inline u32 ethtool_adv_to_fiber_adv_t(u32 ethadv)
+static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise)
{
u32 result = 0;
- if (ethadv & ADVERTISED_1000baseT_Half)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise))
result |= ADVERTISE_FIBER_1000HALF;
- if (ethadv & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise))
result |= ADVERTISE_FIBER_1000FULL;
- if ((ethadv & ADVERTISE_PAUSE_ASYM) && (ethadv & ADVERTISE_PAUSE_CAP))
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) &&
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
result |= LPA_PAUSE_ASYM_FIBER;
- else if (ethadv & ADVERTISE_PAUSE_CAP)
+ else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
result |= (ADVERTISE_PAUSE_FIBER
& (~ADVERTISE_PAUSE_ASYM_FIBER));
@@ -530,14 +531,13 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
int changed = 0;
int err;
int adv, oldadv;
- u32 advertise;
if (phydev->autoneg != AUTONEG_ENABLE)
return genphy_setup_forced(phydev);
/* Only allow advertising what this PHY supports */
- phydev->advertising &= phydev->supported;
- advertise = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
/* Setup fiber advertisement */
adv = phy_read(phydev, MII_ADVERTISE);
@@ -547,7 +547,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
oldadv = adv;
adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL
| LPA_PAUSE_FIBER);
- adv |= ethtool_adv_to_fiber_adv_t(advertise);
+ adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising);
if (adv != oldadv) {
err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- u32 pause;
/* Select page 18 */
err = marvell_set_page(phydev, 18);
@@ -878,9 +877,14 @@ static int m88e1510_config_init(struct phy_device *phydev)
* This means we can never be truely sure what was advertised,
* so disable Pause support.
*/
- pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->supported &= ~pause;
- phydev->advertising &= ~pause;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
}
return m88e1318_config_init(phydev);
@@ -1043,22 +1047,21 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
/**
- * fiber_lpa_to_ethtool_lpa_t
+ * fiber_lpa_to_linkmode_lpa_t
+ * @advertising: the linkmode advertisement settings
* @lpa: value of the MII_LPA register for fiber link
*
* A small helper function that translates MII_LPA
- * bits to ethtool LP advertisement settings.
+ * bits to linkmode LP advertisement settings.
*/
-static u32 fiber_lpa_to_ethtool_lpa_t(u32 lpa)
+static void fiber_lpa_to_linkmode_lpa_t(unsigned long *advertising, u32 lpa)
{
- u32 result = 0;
-
if (lpa & LPA_FIBER_1000HALF)
- result |= ADVERTISED_1000baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising);
if (lpa & LPA_FIBER_1000FULL)
- result |= ADVERTISED_1000baseT_Full;
-
- return result;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising);
}
/**
@@ -1134,9 +1137,8 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
if (!fiber) {
- phydev->lp_advertising =
- mii_stat1000_to_ethtool_lpa_t(lpagb) |
- mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
+ mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, lpagb);
if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
@@ -1144,7 +1146,7 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
} else {
/* The fiber link is only 1000M capable */
- phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
+ fiber_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
if (phydev->duplex == DUPLEX_FULL) {
if (!(lpa & LPA_PAUSE_FIBER)) {
@@ -1183,7 +1185,7 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev)
phydev->pause = 0;
phydev->asym_pause = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
return 0;
}
@@ -1235,7 +1237,8 @@ static int marvell_read_status(struct phy_device *phydev)
int err;
/* Check the fiber mode first */
- if (phydev->supported & SUPPORTED_FIBRE &&
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported) &&
phydev->interface != PHY_INTERFACE_MODE_SGMII) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
@@ -1278,7 +1281,8 @@ static int marvell_suspend(struct phy_device *phydev)
int err;
/* Suspend the fiber mode first */
- if (!(phydev->supported & SUPPORTED_FIBRE)) {
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1312,7 +1316,8 @@ static int marvell_resume(struct phy_device *phydev)
int err;
/* Resume the fiber mode first */
- if (!(phydev->supported & SUPPORTED_FIBRE)) {
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1463,7 +1468,8 @@ error:
static int marvell_get_sset_count(struct phy_device *phydev)
{
- if (phydev->supported & SUPPORTED_FIBRE)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported))
return ARRAY_SIZE(marvell_hw_stats);
else
return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS;
@@ -2005,7 +2011,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1101",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2024,7 +2029,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1112",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2043,7 +2047,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1111",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2063,7 +2066,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1118",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1118_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2082,7 +2084,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1121R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = &m88e1121_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1121_config_aneg,
@@ -2103,7 +2104,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1318S",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1318_config_init,
.config_aneg = &m88e1318_config_aneg,
@@ -2126,7 +2126,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1145",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1145_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2146,7 +2145,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1149R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1149_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2165,7 +2163,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2184,7 +2181,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1116R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1116r_config_init,
.ack_interrupt = &marvell_ack_interrupt,
@@ -2202,7 +2198,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1510",
.features = PHY_GBIT_FIBRE_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = &m88e1510_probe,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2226,7 +2221,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1540",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = m88e1510_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2248,7 +2242,6 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2268,7 +2261,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E3016",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e3016_config_init,
.aneg_done = &marvell_aneg_done,
@@ -2289,7 +2281,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1c9d039eec63..6f6e886fc836 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -252,7 +252,6 @@ static int mv3310_resume(struct phy_device *phydev)
static int mv3310_config_init(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
- u32 mask;
int val;
/* Check that the PHY interface type is compatible */
@@ -336,13 +335,9 @@ static int mv3310_config_init(struct phy_device *phydev)
}
}
- if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
- phydev_warn(phydev,
- "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
-
- phydev->supported &= mask;
- phydev->advertising &= phydev->supported;
+ linkmode_copy(phydev->supported, supported);
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
return 0;
}
@@ -350,7 +345,7 @@ static int mv3310_config_init(struct phy_device *phydev)
static int mv3310_config_aneg(struct phy_device *phydev)
{
bool changed = false;
- u32 advertising;
+ u16 reg;
int ret;
/* We don't support manual MDI control */
@@ -364,31 +359,35 @@ static int mv3310_config_aneg(struct phy_device *phydev)
return genphy_c45_an_disable_aneg(phydev);
}
- phydev->advertising &= phydev->supported;
- advertising = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_100BASE4 |
ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
- ethtool_adv_to_mii_adv_t(advertising));
+ linkmode_adv_to_mii_adv_t(phydev->advertising));
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
+ reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
- ADVERTISE_1000FULL | ADVERTISE_1000HALF,
- ethtool_adv_to_mii_ctrl1000_t(advertising));
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg);
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
/* 10G control register */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->advertising))
+ reg = MDIO_AN_10GBT_CTRL_ADV10G;
+ else
+ reg = 0;
+
ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV10G,
- advertising & ADVERTISED_10000baseT_Full ?
- MDIO_AN_10GBT_CTRL_ADV10G : 0);
+ MDIO_AN_10GBT_CTRL_ADV10G, reg);
if (ret < 0)
return ret;
if (ret > 0)
@@ -458,7 +457,7 @@ static int mv3310_read_status(struct phy_device *phydev)
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
phydev->link = 0;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -491,7 +490,7 @@ static int mv3310_read_status(struct phy_device *phydev)
if (val < 0)
return val;
- phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
+ mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, val);
if (phydev->autoneg == AUTONEG_ENABLE)
phy_resolve_aneg_linkmode(phydev);
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index ddc2c5ea3787..b03bcf2c388a 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -232,7 +232,7 @@ static struct phy_driver meson_gxl_phy[] = {
.phy_id_mask = 0xfffffff0,
.name = "Meson GXL Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
+ .flags = PHY_IS_INTERNAL,
.config_init = meson_gxl_config_init,
.aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9265dea79412..c33384710d26 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -311,17 +311,22 @@ static int kszphy_config_init(struct phy_device *phydev)
static int ksz8041_config_init(struct phy_device *phydev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
struct device_node *of_node = phydev->mdio.dev.of_node;
/* Limit supported and advertised modes in fiber mode */
if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
phydev->dev_flags |= MICREL_PHY_FXEN;
- phydev->supported &= SUPPORTED_100baseT_Full |
- SUPPORTED_100baseT_Half;
- phydev->supported |= SUPPORTED_FIBRE;
- phydev->advertising &= ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half;
- phydev->advertising |= ADVERTISED_FIBRE;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported);
+ linkmode_and(phydev->advertising, phydev->advertising, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->advertising);
phydev->autoneg = AUTONEG_DISABLE;
}
@@ -918,7 +923,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.ack_interrupt = kszphy_ack_interrupt,
@@ -930,7 +934,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021 or KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -946,7 +949,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -962,7 +964,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
@@ -979,7 +980,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -995,7 +995,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1011,7 +1010,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00fffffc,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1027,7 +1025,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1043,7 +1040,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1054,7 +1050,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
@@ -1072,7 +1067,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
@@ -1089,7 +1083,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@ -1115,7 +1108,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1124,7 +1116,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8795",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 04b12e34da58..7557bebd5d7f 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -346,7 +346,6 @@ static struct phy_driver microchip_phy_driver[] = {
.name = "Microchip LAN88xx",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = lan88xx_probe,
.remove = lan88xx_remove,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index c600a8509d60..3d09b471632c 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -47,7 +47,6 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.name = "Microchip LAN87xx T1",
.features = PHY_BASIC_T1_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = genphy_config_init,
.config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 7cae17517744..cfe680f78a3f 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -1829,7 +1829,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi FE VSC8530",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1855,7 +1854,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi VSC8531",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1881,7 +1879,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi FE VSC8540 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1907,7 +1904,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi VSC8541 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1933,7 +1929,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1960,7 +1955,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2b1e336961f9..139bed2c8ab4 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -134,7 +134,6 @@ static struct phy_driver dp83865_driver[] = { {
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ns_config_init,
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index d7636ff03bc7..03af927fa5ad 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -181,7 +181,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
if (val < 0)
return val;
- phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, val);
phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
@@ -191,7 +191,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
return val;
if (val & MDIO_AN_10GBT_STAT_LP10G)
- phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->lp_advertising);
return 0;
}
@@ -304,8 +305,11 @@ EXPORT_SYMBOL_GPL(gen10g_no_soft_reset);
int gen10g_config_init(struct phy_device *phydev)
{
/* Temporarily just say we support everything */
- phydev->supported = SUPPORTED_10000baseT_Full;
- phydev->advertising = SUPPORTED_10000baseT_Full;
+ linkmode_zero(phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index c7da4cbb1103..20fbd5eb56fd 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -62,6 +62,124 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
* must be grouped by speed and sorted in descending match priority
* - iow, descending speed. */
static const struct phy_setting settings[] = {
+ /* 100G */
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ },
+ /* 56G */
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+ },
+ /* 50G */
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ },
+ /* 40G */
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ },
+ /* 25G */
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ },
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ },
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ },
+
+ /* 20G */
+ {
+ .speed = SPEED_20000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_20000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
+ },
+ /* 10G */
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ },
{
.speed = SPEED_10000,
.duplex = DUPLEX_FULL,
@@ -75,22 +193,51 @@ static const struct phy_setting settings[] = {
{
.speed = SPEED_10000,
.duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
.bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
},
+ /* 5G */
+ {
+ .speed = SPEED_5000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ },
+
+ /* 2.5G */
{
.speed = SPEED_2500,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
},
{
- .speed = SPEED_1000,
+ .speed = SPEED_2500,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
},
+ /* 1G */
{
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
},
{
.speed = SPEED_1000,
@@ -103,6 +250,12 @@ static const struct phy_setting settings[] = {
.bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
},
{
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ },
+ /* 100M */
+ {
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
.bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
@@ -112,6 +265,7 @@ static const struct phy_setting settings[] = {
.duplex = DUPLEX_HALF,
.bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
},
+ /* 10M */
{
.speed = SPEED_10,
.duplex = DUPLEX_FULL,
@@ -129,7 +283,6 @@ static const struct phy_setting settings[] = {
* @speed: speed to match
* @duplex: duplex to match
* @mask: allowed link modes
- * @maxbit: bit size of link modes
* @exact: an exact match is required
*
* Search the settings array for a setting that matches the speed and
@@ -143,14 +296,14 @@ static const struct phy_setting settings[] = {
* they all fail, %NULL will be returned.
*/
const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
- size_t maxbit, bool exact)
+phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact)
{
const struct phy_setting *p, *match = NULL, *last = NULL;
int i;
for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->bit < maxbit && test_bit(p->bit, mask)) {
+ if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
+ test_bit(p->bit, mask)) {
last = p;
if (p->speed == speed && p->duplex == duplex) {
/* Exact match for speed and duplex */
@@ -175,13 +328,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
EXPORT_SYMBOL_GPL(phy_lookup_setting);
size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask, size_t maxbit)
+ unsigned long *mask)
{
size_t count;
int i;
for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++)
- if (settings[i].bit < maxbit &&
+ if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
test_bit(settings[i].bit, mask) &&
(count == 0 || speeds[count - 1] != settings[i].speed))
speeds[count++] = settings[i].speed;
@@ -199,35 +352,53 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
*/
void phy_resolve_aneg_linkmode(struct phy_device *phydev)
{
- u32 common = phydev->lp_advertising & phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- if (common & ADVERTISED_10000baseT_Full) {
+ linkmode_and(common, phydev->lp_advertising, phydev->advertising);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) {
phydev->speed = SPEED_10000;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_1000baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ common)) {
+ phydev->speed = SPEED_5000;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ common)) {
+ phydev->speed = SPEED_2500;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_1000;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_1000baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_1000;
phydev->duplex = DUPLEX_HALF;
- } else if (common & ADVERTISED_100baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_100baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_HALF;
- } else if (common & ADVERTISED_10baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_10baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
}
if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
- phydev->asym_pause = !!(phydev->lp_advertising &
- ADVERTISED_Asym_Pause);
+ phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising);
+ phydev->asym_pause = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising);
}
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1d73ac3309ce..376a0d8a2b61 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -46,11 +46,8 @@ static const char *phy_state_to_str(enum phy_state st)
{
switch (st) {
PHY_STATE_STR(DOWN)
- PHY_STATE_STR(STARTING)
PHY_STATE_STR(READY)
- PHY_STATE_STR(PENDING)
PHY_STATE_STR(UP)
- PHY_STATE_STR(AN)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
PHY_STATE_STR(FORCING)
@@ -62,6 +59,17 @@ static const char *phy_state_to_str(enum phy_state st)
return NULL;
}
+static void phy_link_up(struct phy_device *phydev)
+{
+ phydev->phy_link_change(phydev, true, true);
+ phy_led_trigger_change_speed(phydev);
+}
+
+static void phy_link_down(struct phy_device *phydev, bool do_carrier)
+{
+ phydev->phy_link_change(phydev, false, do_carrier);
+ phy_led_trigger_change_speed(phydev);
+}
/**
* phy_print_status - Convenience function to print out the current phy status
@@ -105,9 +113,9 @@ static int phy_clear_interrupt(struct phy_device *phydev)
*
* Returns 0 on success or < 0 on error.
*/
-static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, bool interrupts)
{
- phydev->interrupts = interrupts;
+ phydev->interrupts = interrupts ? 1 : 0;
if (phydev->drv->config_intr)
return phydev->drv->config_intr(phydev);
@@ -171,11 +179,9 @@ EXPORT_SYMBOL(phy_aneg_done);
* settings were found.
*/
static const struct phy_setting *
-phy_find_valid(int speed, int duplex, u32 supported)
+phy_find_valid(int speed, int duplex, unsigned long *supported)
{
- unsigned long mask = supported;
-
- return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false);
+ return phy_lookup_setting(speed, duplex, supported, false);
}
/**
@@ -192,9 +198,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
unsigned int size)
{
- unsigned long supported = phy->supported;
-
- return phy_speeds(speeds, size, &supported, BITS_PER_LONG);
+ return phy_speeds(speeds, size, phy->supported);
}
/**
@@ -206,11 +210,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*
* Description: Returns true if there is a valid setting, false otherwise.
*/
-static inline bool phy_check_valid(int speed, int duplex, u32 features)
+static inline bool phy_check_valid(int speed, int duplex,
+ unsigned long *features)
{
- unsigned long mask = features;
-
- return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true);
+ return !!phy_lookup_setting(speed, duplex, features, true);
}
/**
@@ -224,13 +227,13 @@ static inline bool phy_check_valid(int speed, int duplex, u32 features)
static void phy_sanitize_settings(struct phy_device *phydev)
{
const struct phy_setting *setting;
- u32 features = phydev->supported;
/* Sanitize settings based on PHY capabilities */
- if ((features & SUPPORTED_Autoneg) == 0)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported))
phydev->autoneg = AUTONEG_DISABLE;
- setting = phy_find_valid(phydev->speed, phydev->duplex, features);
+ setting = phy_find_valid(phydev->speed, phydev->duplex,
+ phydev->supported);
if (setting) {
phydev->speed = setting->speed;
phydev->duplex = setting->duplex;
@@ -256,13 +259,15 @@ static void phy_sanitize_settings(struct phy_device *phydev)
*/
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u32 speed = ethtool_cmd_speed(cmd);
if (cmd->phy_address != phydev->mdio.addr)
return -EINVAL;
/* We make sure that we don't pass unsupported values in to the PHY */
- cmd->advertising &= phydev->supported;
+ ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising);
+ linkmode_and(advertising, advertising, phydev->supported);
/* Verify the settings we care about. */
if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
@@ -283,12 +288,14 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
phydev->speed = speed;
- phydev->advertising = cmd->advertising;
+ linkmode_copy(phydev->advertising, advertising);
if (AUTONEG_ENABLE == cmd->autoneg)
- phydev->advertising |= ADVERTISED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
phydev->duplex = cmd->duplex;
@@ -304,25 +311,24 @@ EXPORT_SYMBOL(phy_ethtool_sset);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u8 autoneg = cmd->base.autoneg;
u8 duplex = cmd->base.duplex;
u32 speed = cmd->base.speed;
- u32 advertising;
if (cmd->base.phy_address != phydev->mdio.addr)
return -EINVAL;
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
+ linkmode_copy(advertising, cmd->link_modes.advertising);
/* We make sure that we don't pass unsupported values in to the PHY */
- advertising &= phydev->supported;
+ linkmode_and(advertising, advertising, phydev->supported);
/* Verify the settings we care about. */
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (autoneg == AUTONEG_ENABLE && advertising == 0)
+ if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
return -EINVAL;
if (autoneg == AUTONEG_DISABLE &&
@@ -337,12 +343,14 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
phydev->speed = speed;
- phydev->advertising = advertising;
+ linkmode_copy(phydev->advertising, advertising);
if (autoneg == AUTONEG_ENABLE)
- phydev->advertising |= ADVERTISED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
phydev->duplex = duplex;
@@ -358,14 +366,9 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd)
{
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- phydev->supported);
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- phydev->advertising);
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- phydev->lp_advertising);
+ linkmode_copy(cmd->link_modes.supported, phydev->supported);
+ linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
+ linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
cmd->base.speed = phydev->speed;
cmd->base.duplex = phydev->duplex;
@@ -434,7 +437,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
break;
case MII_ADVERTISE:
- phydev->advertising = mii_adv_to_ethtool_adv_t(val);
+ mii_adv_to_linkmode_adv_t(phydev->advertising,
+ val);
change_autoneg = true;
break;
default:
@@ -467,6 +471,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
+static void phy_queue_state_machine(struct phy_device *phydev,
+ unsigned int secs)
+{
+ mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+ secs * HZ);
+}
+
+static void phy_trigger_machine(struct phy_device *phydev)
+{
+ phy_queue_state_machine(phydev, 0);
+}
+
static int phy_config_aneg(struct phy_device *phydev)
{
if (phydev->drv->config_aneg)
@@ -482,6 +498,34 @@ static int phy_config_aneg(struct phy_device *phydev)
}
/**
+ * phy_check_link_status - check link status and set state accordingly
+ * @phydev: the phy_device struct
+ *
+ * Description: Check for link and whether autoneg was triggered / is running
+ * and set state accordingly
+ */
+static int phy_check_link_status(struct phy_device *phydev)
+{
+ int err;
+
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
+ err = phy_read_status(phydev);
+ if (err)
+ return err;
+
+ if (phydev->link && phydev->state != PHY_RUNNING) {
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ } else if (!phydev->link && phydev->state != PHY_NOLINK) {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, true);
+ }
+
+ return 0;
+}
+
+/**
* phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
@@ -492,7 +536,6 @@ static int phy_config_aneg(struct phy_device *phydev)
*/
int phy_start_aneg(struct phy_device *phydev)
{
- bool trigger = 0;
int err;
if (!phydev->drv)
@@ -504,7 +547,7 @@ int phy_start_aneg(struct phy_device *phydev)
phy_sanitize_settings(phydev);
/* Invalidate LP advertising flags */
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
err = phy_config_aneg(phydev);
if (err < 0)
@@ -512,32 +555,16 @@ int phy_start_aneg(struct phy_device *phydev)
if (phydev->state != PHY_HALTED) {
if (AUTONEG_ENABLE == phydev->autoneg) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
+ err = phy_check_link_status(phydev);
} else {
phydev->state = PHY_FORCING;
phydev->link_timeout = PHY_FORCE_TIMEOUT;
}
}
- /* Re-schedule a PHY state machine to check PHY status because
- * negotiation may already be done and aneg interrupt may not be
- * generated.
- */
- if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) {
- err = phy_aneg_done(phydev);
- if (err > 0) {
- trigger = true;
- err = 0;
- }
- }
-
out_unlock:
mutex_unlock(&phydev->lock);
- if (trigger)
- phy_trigger_machine(phydev);
-
return err;
}
EXPORT_SYMBOL(phy_start_aneg);
@@ -573,20 +600,38 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
*/
int phy_speed_down(struct phy_device *phydev, bool sync)
{
- u32 adv = phydev->lp_advertising & phydev->supported;
- u32 adv_old = phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
int ret;
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- if (adv & PHY_10BT_FEATURES)
- phydev->advertising &= ~(PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES);
- else if (adv & PHY_100BT_FEATURES)
- phydev->advertising &= ~PHY_1000BT_FEATURES;
+ linkmode_copy(adv_old, phydev->advertising);
+ linkmode_copy(adv, phydev->lp_advertising);
+ linkmode_and(adv, adv, phydev->supported);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising);
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ adv) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising);
+ }
- if (phydev->advertising == adv_old)
+ if (linkmode_equal(phydev->advertising, adv_old))
return 0;
ret = phy_config_aneg(phydev);
@@ -605,28 +650,36 @@ EXPORT_SYMBOL_GPL(phy_speed_down);
*/
int phy_speed_up(struct phy_device *phydev)
{
- u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES;
- u32 adv_old = phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds);
+
+ linkmode_copy(adv_old, phydev->advertising);
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds);
+
+ linkmode_andnot(not_speeds, adv_old, all_speeds);
+ linkmode_copy(supported, phydev->supported);
+ linkmode_and(speeds, supported, all_speeds);
+ linkmode_or(phydev->advertising, not_speeds, speeds);
- if (phydev->advertising == adv_old)
+ if (linkmode_equal(phydev->advertising, adv_old))
return 0;
return phy_config_aneg(phydev);
}
EXPORT_SYMBOL_GPL(phy_speed_up);
-static void phy_queue_state_machine(struct phy_device *phydev,
- unsigned int secs)
-{
- mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- secs * HZ);
-}
-
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
@@ -644,20 +697,6 @@ void phy_start_machine(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_start_machine);
/**
- * phy_trigger_machine - trigger the state machine to run
- *
- * @phydev: the phy_device struct
- *
- * Description: There has been a change in state which requires that the
- * state machine runs.
- */
-
-void phy_trigger_machine(struct phy_device *phydev)
-{
- phy_queue_state_machine(phydev, 0);
-}
-
-/**
* phy_stop_machine - stop the PHY state machine tracking
* @phydev: target phy_device struct
*
@@ -711,30 +750,26 @@ static int phy_disable_interrupts(struct phy_device *phydev)
}
/**
- * phy_change - Called by the phy_interrupt to handle PHY changes
- * @phydev: phy_device struct that interrupted
+ * phy_interrupt - PHY interrupt handler
+ * @irq: interrupt line
+ * @phy_dat: phy_device pointer
+ *
+ * Description: Handle PHY interrupt
*/
-static irqreturn_t phy_change(struct phy_device *phydev)
+static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
- if (phy_interrupt_is_valid(phydev)) {
- if (phydev->drv->did_interrupt &&
- !phydev->drv->did_interrupt(phydev))
- return IRQ_NONE;
-
- if (phydev->state == PHY_HALTED)
- if (phy_disable_interrupts(phydev))
- goto phy_err;
- }
+ struct phy_device *phydev = phy_dat;
- mutex_lock(&phydev->lock);
- if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
- phydev->state = PHY_CHANGELINK;
- mutex_unlock(&phydev->lock);
+ if (PHY_HALTED == phydev->state)
+ return IRQ_NONE; /* It can't be ours. */
+
+ if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
+ return IRQ_NONE;
/* reschedule state queue work to run as soon as possible */
phy_trigger_machine(phydev);
- if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+ if (phy_clear_interrupt(phydev))
goto phy_err;
return IRQ_HANDLED;
@@ -744,36 +779,6 @@ phy_err:
}
/**
- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
- * @work: work_struct that describes the work to be done
- */
-void phy_change_work(struct work_struct *work)
-{
- struct phy_device *phydev =
- container_of(work, struct phy_device, phy_queue);
-
- phy_change(phydev);
-}
-
-/**
- * phy_interrupt - PHY interrupt handler
- * @irq: interrupt line
- * @phy_dat: phy_device pointer
- *
- * Description: When a PHY interrupt occurs, the handler disables
- * interrupts, and uses phy_change to handle the interrupt.
- */
-static irqreturn_t phy_interrupt(int irq, void *phy_dat)
-{
- struct phy_device *phydev = phy_dat;
-
- if (PHY_HALTED == phydev->state)
- return IRQ_NONE; /* It can't be ours. */
-
- return phy_change(phydev);
-}
-
-/**
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
@@ -851,7 +856,7 @@ out_unlock:
phy_state_machine(&phydev->state_queue.work);
/* Cannot call flush_scheduled_work() here as desired because
- * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
+ * of rtnl_lock(), but PHY_HALTED shall guarantee irq handler
* will not reenable interrupts.
*/
}
@@ -874,9 +879,6 @@ void phy_start(struct phy_device *phydev)
mutex_lock(&phydev->lock);
switch (phydev->state) {
- case PHY_STARTING:
- phydev->state = PHY_PENDING;
- break;
case PHY_READY:
phydev->state = PHY_UP;
break;
@@ -902,18 +904,6 @@ void phy_start(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_start);
-static void phy_link_up(struct phy_device *phydev)
-{
- phydev->phy_link_change(phydev, true, true);
- phy_led_trigger_change_speed(phydev);
-}
-
-static void phy_link_down(struct phy_device *phydev, bool do_carrier)
-{
- phydev->phy_link_change(phydev, false, do_carrier);
- phy_led_trigger_change_speed(phydev);
-}
-
/**
* phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
@@ -936,63 +926,17 @@ void phy_state_machine(struct work_struct *work)
switch (phydev->state) {
case PHY_DOWN:
- case PHY_STARTING:
case PHY_READY:
- case PHY_PENDING:
break;
case PHY_UP:
needs_aneg = true;
- phydev->link_timeout = PHY_AN_TIMEOUT;
-
- break;
- case PHY_AN:
- err = phy_read_status(phydev);
- if (err < 0)
- break;
-
- /* If the link is down, give up on negotiation for now */
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- break;
- }
-
- /* Check if negotiation is done. Break if there's an error */
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- /* If AN is done, we're running */
- if (err > 0) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else if (0 == phydev->link_timeout--)
- needs_aneg = true;
break;
case PHY_NOLINK:
- if (!phy_polling_mode(phydev))
- break;
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- if (!err) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- break;
- }
- }
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- }
+ case PHY_RUNNING:
+ case PHY_CHANGELINK:
+ case PHY_RESUMING:
+ err = phy_check_link_status(phydev);
break;
case PHY_FORCING:
err = genphy_update_link(phydev);
@@ -1008,32 +952,6 @@ void phy_state_machine(struct work_struct *work)
phy_link_down(phydev, false);
}
break;
- case PHY_RUNNING:
- if (!phy_polling_mode(phydev))
- break;
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- }
- break;
- case PHY_CHANGELINK:
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- }
- break;
case PHY_HALTED:
if (phydev->link) {
phydev->link = 0;
@@ -1041,30 +959,6 @@ void phy_state_machine(struct work_struct *work)
do_suspend = true;
}
break;
- case PHY_RESUMING:
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0) {
- break;
- } else if (!err) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- break;
- }
- }
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
- }
- break;
}
mutex_unlock(&phydev->lock);
@@ -1104,10 +998,34 @@ void phy_state_machine(struct work_struct *work)
void phy_mac_interrupt(struct phy_device *phydev)
{
/* Trigger a state machine change */
- queue_work(system_power_efficient_wq, &phydev->phy_queue);
+ phy_trigger_machine(phydev);
}
EXPORT_SYMBOL(phy_mac_interrupt);
+static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv)
+{
+ linkmode_zero(advertising);
+
+ if (eee_adv & MDIO_EEE_100TX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_1000T)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_1000KX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GKX4)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GKR)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ advertising);
+}
+
/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
@@ -1126,9 +1044,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
/* According to 802.3az,the EEE is supported only in full duplex-mode.
*/
if (phydev->duplex == DUPLEX_FULL) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
int eee_lp, eee_cap, eee_adv;
- u32 lp, cap, adv;
int status;
+ u32 cap;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
@@ -1155,9 +1076,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (eee_adv <= 0)
goto eee_exit_err;
- adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
- lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
- if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
+ mmd_eee_adv_to_linkmode(adv, eee_adv);
+ mmd_eee_adv_to_linkmode(lp, eee_lp);
+ linkmode_and(common, adv, lp);
+
+ if (!phy_check_valid(phydev->speed, phydev->duplex, common))
goto eee_exit_err;
if (clk_stop_enable) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 23ee3967c166..0904002b19a2 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -66,10 +66,12 @@ static const int phy_basic_ports_array[] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_MII_BIT,
};
+EXPORT_SYMBOL_GPL(phy_basic_ports_array);
static const int phy_fibre_port_array[] = {
ETHTOOL_LINK_MODE_FIBRE_BIT,
};
+EXPORT_SYMBOL_GPL(phy_fibre_port_array);
static const int phy_all_ports_features_array[] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
@@ -80,27 +82,32 @@ static const int phy_all_ports_features_array[] = {
ETHTOOL_LINK_MODE_BNC_BIT,
ETHTOOL_LINK_MODE_Backplane_BIT,
};
+EXPORT_SYMBOL_GPL(phy_all_ports_features_array);
-static const int phy_10_100_features_array[] = {
+const int phy_10_100_features_array[4] = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_10_100_features_array);
-static const int phy_basic_t1_features_array[] = {
+const int phy_basic_t1_features_array[2] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
-static const int phy_gbit_features_array[] = {
+const int phy_gbit_features_array[2] = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_gbit_features_array);
-static const int phy_10gbit_features_array[] = {
+const int phy_10gbit_features_array[1] = {
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
@@ -587,7 +594,6 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
- INIT_WORK(&dev->phy_queue, phy_change_work);
/* Request the appropriate module unconditionally; don't
* bother trying to do so only if it isn't already loaded,
@@ -1442,8 +1448,13 @@ static int genphy_config_advert(struct phy_device *phydev)
int err, changed = 0;
/* Only allow advertising what this PHY supports */
- phydev->advertising &= phydev->supported;
- advertise = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+ if (!ethtool_convert_link_mode_to_legacy_u32(&advertise,
+ phydev->advertising))
+ phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ phydev->advertising);
/* Setup standard advertisement */
adv = phy_read(phydev, MII_ADVERTISE);
@@ -1482,10 +1493,11 @@ static int genphy_config_advert(struct phy_device *phydev)
oldadv = adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (phydev->supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported))
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
- }
if (adv != oldadv)
changed = 1;
@@ -1690,11 +1702,13 @@ int genphy_read_status(struct phy_device *phydev)
if (err)
return err;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
if (AUTONEG_ENABLE == phydev->autoneg) {
- if (phydev->supported & (SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported)) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
return lpagb;
@@ -1711,8 +1725,8 @@ int genphy_read_status(struct phy_device *phydev)
return -ENOLINK;
}
- phydev->lp_advertising =
- mii_stat1000_to_ethtool_lpa_t(lpagb);
+ mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising,
+ lpagb);
common_adv_gb = lpagb & adv << 2;
}
@@ -1720,7 +1734,7 @@ int genphy_read_status(struct phy_device *phydev)
if (lpa < 0)
return lpa;
- phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
@@ -1801,11 +1815,13 @@ EXPORT_SYMBOL(genphy_soft_reset);
int genphy_config_init(struct phy_device *phydev)
{
int val;
- u32 features;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, };
- features = (SUPPORTED_TP | SUPPORTED_MII
- | SUPPORTED_AUI | SUPPORTED_FIBRE |
- SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features);
/* Do we support autonegotiation? */
val = phy_read(phydev, MII_BMSR);
@@ -1813,16 +1829,16 @@ int genphy_config_init(struct phy_device *phydev)
return val;
if (val & BMSR_ANEGCAPABLE)
- features |= SUPPORTED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features);
if (val & BMSR_100FULL)
- features |= SUPPORTED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features);
if (val & BMSR_100HALF)
- features |= SUPPORTED_100baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features);
if (val & BMSR_10FULL)
- features |= SUPPORTED_10baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features);
if (val & BMSR_10HALF)
- features |= SUPPORTED_10baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features);
if (val & BMSR_ESTATEN) {
val = phy_read(phydev, MII_ESTATUS);
@@ -1830,13 +1846,15 @@ int genphy_config_init(struct phy_device *phydev)
return val;
if (val & ESTATUS_1000_TFULL)
- features |= SUPPORTED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ features);
if (val & ESTATUS_1000_THALF)
- features |= SUPPORTED_1000baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ features);
}
- phydev->supported &= features;
- phydev->advertising &= features;
+ linkmode_and(phydev->supported, phydev->supported, features);
+ linkmode_and(phydev->advertising, phydev->advertising, features);
return 0;
}
@@ -1880,20 +1898,37 @@ EXPORT_SYMBOL(genphy_loopback);
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
- PHY_10BT_FEATURES);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds) = { 0, };
+
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ speeds);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ speeds);
+
+ linkmode_andnot(phydev->supported, phydev->supported, speeds);
switch (max_speed) {
default:
return -ENOTSUPP;
case SPEED_1000:
- phydev->supported |= PHY_1000BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported);
/* fall through */
case SPEED_100:
- phydev->supported |= PHY_100BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->supported);
/* fall through */
case SPEED_10:
- phydev->supported |= PHY_10BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phydev->supported);
}
return 0;
@@ -1907,7 +1942,7 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
if (err)
return err;
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
@@ -1924,10 +1959,8 @@ EXPORT_SYMBOL(phy_set_max_speed);
*/
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode)
{
- WARN_ON(link_mode > 31);
-
- phydev->supported &= ~BIT(link_mode);
- phydev->advertising = phydev->supported;
+ linkmode_clear_bit(link_mode, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_remove_link_mode);
@@ -1940,9 +1973,9 @@ EXPORT_SYMBOL(phy_remove_link_mode);
*/
void phy_support_sym_pause(struct phy_device *phydev)
{
- phydev->supported &= ~SUPPORTED_Asym_Pause;
- phydev->supported |= SUPPORTED_Pause;
- phydev->advertising = phydev->supported;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_sym_pause);
@@ -1954,8 +1987,9 @@ EXPORT_SYMBOL(phy_support_sym_pause);
*/
void phy_support_asym_pause(struct phy_device *phydev)
{
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->advertising = phydev->supported;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_asym_pause);
@@ -1973,12 +2007,13 @@ EXPORT_SYMBOL(phy_support_asym_pause);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg)
{
- phydev->supported &= ~SUPPORTED_Pause;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
if (rx && tx && autoneg)
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_set_sym_pause);
@@ -1995,20 +2030,29 @@ EXPORT_SYMBOL(phy_set_sym_pause);
*/
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx)
{
- u16 oldadv = phydev->advertising;
- u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv);
- if (rx)
- newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- if (tx)
- newadv ^= SUPPORTED_Asym_Pause;
+ linkmode_copy(oldadv, phydev->advertising);
- if (oldadv != newadv) {
- phydev->advertising = newadv;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
- if (phydev->autoneg)
- phy_start_aneg(phydev);
+ if (rx) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
}
+
+ if (tx)
+ linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
+ if (!linkmode_equal(oldadv, phydev->advertising) &&
+ phydev->autoneg)
+ phy_start_aneg(phydev);
}
EXPORT_SYMBOL(phy_set_asym_pause);
@@ -2024,8 +2068,10 @@ EXPORT_SYMBOL(phy_set_asym_pause);
bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp)
{
- if (!(phydev->supported & SUPPORTED_Pause) ||
- (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported) ||
+ (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported) &&
pp->rx_pause != pp->tx_pause))
return false;
return true;
@@ -2074,6 +2120,11 @@ static void of_set_phy_eee_broken(struct phy_device *phydev)
phydev->eee_broken_modes = broken;
}
+static bool phy_drv_supports_irq(struct phy_driver *phydrv)
+{
+ return phydrv->config_intr && phydrv->ack_interrupt;
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -2095,8 +2146,7 @@ static int phy_probe(struct device *dev)
/* Disable the interrupt if the PHY doesn't support it
* but the interrupt is still a valid one
*/
- if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
- phy_interrupt_is_valid(phydev))
+ if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev))
phydev->irq = PHY_POLL;
if (phydrv->flags & PHY_IS_INTERNAL)
@@ -2109,9 +2159,9 @@ static int phy_probe(struct device *dev)
* or both of these values
*/
ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features);
- phydev->supported = features;
+ linkmode_copy(phydev->supported, phydrv->features);
of_set_phy_supported(phydev);
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
/* Get the EEE modes we want to prohibit. We will ask
* the PHY stop advertising these mode later on
@@ -2131,14 +2181,22 @@ static int phy_probe(struct device *dev)
*/
if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) ||
test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) {
- phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features))
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydrv->features))
- phydev->supported |= SUPPORTED_Asym_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
} else {
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
}
/* Set the state to READY by default */
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 491efc1bf5c4..263385b75bba 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -67,7 +67,7 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
EXPORT_SYMBOL_GPL(phy_led_trigger_change_speed);
static void phy_led_trigger_format_name(struct phy_device *phy, char *buf,
- size_t size, char *suffix)
+ size_t size, const char *suffix)
{
snprintf(buf, size, PHY_ID_FMT ":%s",
phy->mdio.bus->id, phy->mdio.addr, suffix);
@@ -77,20 +77,9 @@ static int phy_led_trigger_register(struct phy_device *phy,
struct phy_led_trigger *plt,
unsigned int speed)
{
- char name_suffix[PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE];
-
plt->speed = speed;
-
- if (speed < SPEED_1000)
- snprintf(name_suffix, sizeof(name_suffix), "%dMbps", speed);
- else if (speed == SPEED_2500)
- snprintf(name_suffix, sizeof(name_suffix), "2.5Gbps");
- else
- snprintf(name_suffix, sizeof(name_suffix), "%dGbps",
- DIV_ROUND_CLOSEST(speed, 1000));
-
phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name),
- name_suffix);
+ phy_speed_to_str(speed));
plt->trigger.name = plt->name;
return led_trigger_register(&plt->trigger);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9b8dd0d0ee42..e7becc7379d7 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -191,8 +191,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_validate(pl, pl->supported, &pl->link_config);
s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
- pl->supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS, true);
+ pl->supported, true);
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
if (s) {
@@ -634,13 +633,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
{
struct phylink_link_state config;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- u32 advertising;
int ret;
memset(&config, 0, sizeof(config));
- ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported);
- ethtool_convert_legacy_u32_to_link_mode(config.advertising,
- phy->advertising);
+ linkmode_copy(supported, phy->supported);
+ linkmode_copy(config.advertising, phy->advertising);
config.interface = pl->link_config.interface;
/*
@@ -673,15 +670,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
linkmode_copy(pl->link_config.advertising, config.advertising);
/* Restrict the phy advertisement according to the MAC support. */
- ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising);
- phy->advertising = advertising;
+ linkmode_copy(phy->advertising, config.advertising);
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
netdev_dbg(pl->netdev,
- "phy: setting supported %*pb advertising 0x%08x\n",
+ "phy: setting supported %*pb advertising %*pb\n",
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
- phy->advertising);
+ __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
phy_start_machine(phy);
if (phy->irq > 0)
@@ -1088,8 +1084,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* duplex.
*/
s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- pl->supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS, false);
+ pl->supported, false);
if (!s)
return -EINVAL;
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 889a4dce1648..cfe2313dbefd 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -116,7 +116,6 @@ static struct phy_driver qs6612_driver[] = { {
.name = "QS6612",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = qs6612_config_init,
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 271e8adc39f1..c6010fb1aa0f 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -213,17 +213,13 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
static struct phy_driver realtek_drvs[] = {
{
- .phy_id = 0x00008201,
+ PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
- .phy_id_mask = 0x0000ffff,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
}, {
- .phy_id = 0x001cc816,
+ PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
.suspend = genphy_suspend,
@@ -231,19 +227,16 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .phy_id = 0x001cc910,
+ PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.config_aneg = rtl8211_config_aneg,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
- .phy_id = 0x001cc912,
+ PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
.read_mmd = &genphy_read_mmd_unsupported,
@@ -251,39 +244,32 @@ static struct phy_driver realtek_drvs[] = {
.suspend = rtl8211b_suspend,
.resume = rtl8211b_resume,
}, {
- .phy_id = 0x001cc913,
+ PHY_ID_MATCH_EXACT(0x001cc913),
.name = "RTL8211C Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.config_init = rtl8211c_config_init,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
- .phy_id = 0x001cc914,
+ PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = 0x001cc915,
+ PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = 0x001cc916,
+ PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &rtl8211f_config_init,
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
@@ -292,11 +278,9 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .phy_id = 0x001cc961,
+ PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &rtl8366rb_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -305,15 +289,8 @@ static struct phy_driver realtek_drvs[] = {
module_phy_driver(realtek_drvs);
-static struct mdio_device_id __maybe_unused realtek_tbl[] = {
- { 0x001cc816, 0x001fffff },
- { 0x001cc910, 0x001fffff },
- { 0x001cc912, 0x001fffff },
- { 0x001cc913, 0x001fffff },
- { 0x001cc914, 0x001fffff },
- { 0x001cc915, 0x001fffff },
- { 0x001cc916, 0x001fffff },
- { 0x001cc961, 0x001fffff },
+static const struct mdio_device_id __maybe_unused realtek_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(0x001cc800) },
{ }
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c328208388da..f9477ff55545 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -219,7 +219,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN83C185",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -239,7 +238,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8187",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -264,7 +262,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8700",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -290,7 +287,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN911x Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -309,7 +305,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8710/LAN8720",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN,
+ .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
@@ -335,7 +331,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8740",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 2fe9a87b55b5..33d733684f5b 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -87,7 +87,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
@@ -98,7 +97,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id_mask = 0xffffffff,
.name = "STe100p",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 55f48ee3595a..1e4fc42e4629 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -47,7 +47,7 @@ static int upd60620_read_status(struct phy_device *phydev)
return phy_state;
phydev->link = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -70,8 +70,8 @@ static int upd60620_read_status(struct phy_device *phydev)
if (phy_state < 0)
return phy_state;
- phydev->lp_advertising
- = mii_lpa_to_ethtool_lpa_t(phy_state);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising,
+ phy_state);
if (phydev->duplex == DUPLEX_FULL) {
if (phy_state & LPA_PAUSE_CAP)
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index fbf9ad429593..0646af458f6a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -70,7 +70,6 @@
#define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8572 0x000704d0
-#define PHY_ID_VSC8574 0x000704a0
#define PHY_ID_VSC8601 0x00070420
#define PHY_ID_VSC7385 0x00070450
#define PHY_ID_VSC7388 0x00070480
@@ -303,7 +302,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
phydev->drv->phy_id == PHY_ID_VSC8244 ||
phydev->drv->phy_id == PHY_ID_VSC8514 ||
phydev->drv->phy_id == PHY_ID_VSC8572 ||
- phydev->drv->phy_id == PHY_ID_VSC8574 ||
phydev->drv->phy_id == PHY_ID_VSC8601) ?
MII_VSC8244_IMASK_MASK :
MII_VSC8221_IMASK_MASK);
@@ -399,7 +397,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8234",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -409,7 +406,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8244",
.phy_id_mask = 0x000fffc0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -419,7 +415,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8514",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -429,17 +424,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8572",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_init = &vsc824x_config_init,
- .config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc82xx_config_intr,
-}, {
- .phy_id = PHY_ID_VSC8574,
- .name = "Vitesse VSC8574",
- .phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -449,7 +433,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8601",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8601_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -494,7 +477,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -505,7 +487,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8221",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -515,7 +496,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8211",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -528,7 +508,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8514, 0x000ffff0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
- { PHY_ID_VSC8574, 0x000ffff0 },
{ PHY_ID_VSC7385, 0x000ffff0 },
{ PHY_ID_VSC7388, 0x000ffff0 },
{ PHY_ID_VSC7395, 0x000ffff0 },
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e244f5d7512a..56575f88d1fd 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -188,6 +188,11 @@ struct tun_file {
struct xdp_rxq_info xdp_rxq;
};
+struct tun_page {
+ struct page *page;
+ int count;
+};
+
struct tun_flow_entry {
struct hlist_node hash_link;
struct rcu_head rcu;
@@ -1473,23 +1478,22 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) {
- struct page_frag *pfrag = &current->task_frag;
size_t fragsz = it->iov[i].iov_len;
+ struct page *page;
+ void *frag;
if (fragsz == 0 || fragsz > PAGE_SIZE) {
err = -EINVAL;
goto free;
}
-
- if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
+ frag = netdev_alloc_frag(fragsz);
+ if (!frag) {
err = -ENOMEM;
goto free;
}
-
- skb_fill_page_desc(skb, i - 1, pfrag->page,
- pfrag->offset, fragsz);
- page_ref_inc(pfrag->page);
- pfrag->offset += fragsz;
+ page = virt_to_head_page(frag);
+ skb_fill_page_desc(skb, i - 1, page,
+ frag - page_address(page), fragsz);
}
return skb;
@@ -2381,9 +2385,16 @@ static void tun_sock_write_space(struct sock *sk)
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
+static void tun_put_page(struct tun_page *tpage)
+{
+ if (tpage->page)
+ __page_frag_cache_drain(tpage->page, tpage->count);
+}
+
static int tun_xdp_one(struct tun_struct *tun,
struct tun_file *tfile,
- struct xdp_buff *xdp, int *flush)
+ struct xdp_buff *xdp, int *flush,
+ struct tun_page *tpage)
{
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
struct virtio_net_hdr *gso = &hdr->gso;
@@ -2394,6 +2405,7 @@ static int tun_xdp_one(struct tun_struct *tun,
int buflen = hdr->buflen;
int err = 0;
bool skb_xdp = false;
+ struct page *page;
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
@@ -2420,7 +2432,14 @@ static int tun_xdp_one(struct tun_struct *tun,
case XDP_PASS:
break;
default:
- put_page(virt_to_head_page(xdp->data));
+ page = virt_to_head_page(xdp->data);
+ if (tpage->page == page) {
+ ++tpage->count;
+ } else {
+ tun_put_page(tpage);
+ tpage->page = page;
+ tpage->count = 1;
+ }
return 0;
}
}
@@ -2452,7 +2471,8 @@ build:
goto out;
}
- if (!rcu_dereference(tun->steering_prog))
+ if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
+ !tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
skb_record_rx_queue(skb, tfile->queue_index);
@@ -2484,15 +2504,18 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return -EBADFD;
if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ struct tun_page tpage;
int n = ctl->num;
int flush = 0;
+ memset(&tpage, 0, sizeof(tpage));
+
local_bh_disable();
rcu_read_lock();
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
- tun_xdp_one(tun, tfile, xdp, &flush);
+ tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
}
if (flush)
@@ -2501,6 +2524,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
rcu_read_unlock();
local_bh_enable();
+ tun_put_page(&tpage);
+
ret = total_len;
goto out;
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 418b0904cecb..e5fb8ef2d815 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -613,4 +613,16 @@ config USB_NET_CH9200
To compile this driver as a module, choose M here: the
module will be called ch9200.
+config USB_NET_AQC111
+ tristate "Aquantia AQtion USB to 5/2.5GbE Controllers support"
+ depends on USB_USBNET
+ select CRC32
+ default y
+ help
+ This option adds support for Aquantia AQtion USB
+ Ethernet adapters based on AQC111U/AQC112 chips.
+
+ This driver should work with at least the following devices:
+ * Aquantia AQtion USB to 5GbE
+
endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 27307a4ab003..99fd12be2111 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_USB_VL600) += lg-vl600.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
+obj-$(CONFIG_USB_NET_AQC111) += aqc111.o
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
new file mode 100644
index 000000000000..f69d566bd523
--- /dev/null
+++ b/drivers/net/usb/aqc111.c
@@ -0,0 +1,1457 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
+ * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2002-2003 TiVo Inc.
+ * Copyright (C) 2017-2018 ASIX
+ * Copyright (C) 2018 Aquantia Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/linkmode.h>
+
+#include "aqc111.h"
+
+#define DRIVER_NAME "aqc111"
+
+static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, data, size);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net,
+ "Failed to read(0x%x) reg index 0x%04x: %d\n",
+ cmd, index, ret);
+
+ return ret;
+}
+
+static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, data, size);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net,
+ "Failed to read(0x%x) reg index 0x%04x: %d\n",
+ cmd, index, ret);
+
+ return ret;
+}
+
+static int aqc111_read16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ int ret = 0;
+
+ ret = aqc111_read_cmd_nopm(dev, cmd, value, index, sizeof(*data), data);
+ le16_to_cpus(data);
+
+ return ret;
+}
+
+static int aqc111_read16_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ int ret = 0;
+
+ ret = aqc111_read_cmd(dev, cmd, value, index, sizeof(*data), data);
+ le16_to_cpus(data);
+
+ return ret;
+}
+
+static int __aqc111_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, u16 size, const void *data)
+{
+ int err = -ENOMEM;
+ void *buf = NULL;
+
+ netdev_dbg(dev->net,
+ "%s cmd=%#x reqtype=%#x value=%#x index=%#x size=%d\n",
+ __func__, cmd, reqtype, value, index, size);
+
+ if (data) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ }
+
+ err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ cmd, reqtype, value, index, buf, size,
+ (cmd == AQ_PHY_POWER) ? AQ_USB_PHY_SET_TIMEOUT :
+ AQ_USB_SET_TIMEOUT);
+
+ if (unlikely(err < 0))
+ netdev_warn(dev->net,
+ "Failed to write(0x%x) reg index 0x%04x: %d\n",
+ cmd, index, err);
+ kfree(buf);
+
+out:
+ return err;
+}
+
+static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, size, data);
+
+ return ret;
+}
+
+static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return -ENODEV;
+
+ ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, size, data);
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static int aqc111_write16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ u16 tmp = *data;
+
+ cpu_to_le16s(&tmp);
+
+ return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp);
+}
+
+static int aqc111_write16_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ u16 tmp = *data;
+
+ cpu_to_le16s(&tmp);
+
+ return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp);
+}
+
+static int aqc111_write32_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u32 *data)
+{
+ u32 tmp = *data;
+
+ cpu_to_le32s(&tmp);
+
+ return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp);
+}
+
+static int aqc111_write32_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u32 *data)
+{
+ u32 tmp = *data;
+
+ cpu_to_le32s(&tmp);
+
+ return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp);
+}
+
+static int aqc111_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ return usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, data,
+ size);
+}
+
+static int aqc111_write16_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ u16 tmp = *data;
+
+ cpu_to_le16s(&tmp);
+
+ return aqc111_write_cmd_async(dev, cmd, value, index,
+ sizeof(tmp), &tmp);
+}
+
+static void aqc111_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u",
+ aqc111_data->fw_ver.major,
+ aqc111_data->fw_ver.minor,
+ aqc111_data->fw_ver.rev);
+ info->eedump_len = 0x00;
+ info->regdump_len = 0x00;
+}
+
+static void aqc111_get_wol(struct net_device *net,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+
+ wolinfo->supported = WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+
+ if (aqc111_data->wol_flags & AQ_WOL_FLAG_MP)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int aqc111_set_wol(struct net_device *net,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+
+ if (wolinfo->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ aqc111_data->wol_flags = 0;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ aqc111_data->wol_flags |= AQ_WOL_FLAG_MP;
+
+ return 0;
+}
+
+static void aqc111_speed_to_link_mode(u32 speed,
+ struct ethtool_link_ksettings *elk)
+{
+ switch (speed) {
+ case SPEED_5000:
+ ethtool_link_ksettings_add_link_mode(elk, advertising,
+ 5000baseT_Full);
+ break;
+ case SPEED_2500:
+ ethtool_link_ksettings_add_link_mode(elk, advertising,
+ 2500baseT_Full);
+ break;
+ case SPEED_1000:
+ ethtool_link_ksettings_add_link_mode(elk, advertising,
+ 1000baseT_Full);
+ break;
+ case SPEED_100:
+ ethtool_link_ksettings_add_link_mode(elk, advertising,
+ 100baseT_Full);
+ break;
+ }
+}
+
+static int aqc111_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *elk)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ enum usb_device_speed usb_speed = dev->udev->speed;
+ u32 speed = SPEED_UNKNOWN;
+
+ ethtool_link_ksettings_zero_link_mode(elk, supported);
+ ethtool_link_ksettings_add_link_mode(elk, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(elk, supported,
+ 1000baseT_Full);
+ if (usb_speed == USB_SPEED_SUPER) {
+ ethtool_link_ksettings_add_link_mode(elk, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(elk, supported,
+ 5000baseT_Full);
+ }
+ ethtool_link_ksettings_add_link_mode(elk, supported, TP);
+ ethtool_link_ksettings_add_link_mode(elk, supported, Autoneg);
+
+ elk->base.port = PORT_TP;
+ elk->base.transceiver = XCVR_INTERNAL;
+
+ elk->base.mdio_support = 0x00; /*Not supported*/
+
+ if (aqc111_data->autoneg)
+ linkmode_copy(elk->link_modes.advertising,
+ elk->link_modes.supported);
+ else
+ aqc111_speed_to_link_mode(aqc111_data->advertised_speed, elk);
+
+ elk->base.autoneg = aqc111_data->autoneg;
+
+ switch (aqc111_data->link_speed) {
+ case AQ_INT_SPEED_5G:
+ speed = SPEED_5000;
+ break;
+ case AQ_INT_SPEED_2_5G:
+ speed = SPEED_2500;
+ break;
+ case AQ_INT_SPEED_1G:
+ speed = SPEED_1000;
+ break;
+ case AQ_INT_SPEED_100M:
+ speed = SPEED_100;
+ break;
+ }
+ elk->base.duplex = DUPLEX_FULL;
+ elk->base.speed = speed;
+
+ return 0;
+}
+
+static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+
+ aqc111_data->phy_cfg &= ~AQ_ADV_MASK;
+ aqc111_data->phy_cfg |= AQ_PAUSE;
+ aqc111_data->phy_cfg |= AQ_ASYM_PAUSE;
+ aqc111_data->phy_cfg |= AQ_DOWNSHIFT;
+ aqc111_data->phy_cfg &= ~AQ_DSH_RETRIES_MASK;
+ aqc111_data->phy_cfg |= (3 << AQ_DSH_RETRIES_SHIFT) &
+ AQ_DSH_RETRIES_MASK;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ switch (speed) {
+ case SPEED_5000:
+ aqc111_data->phy_cfg |= AQ_ADV_5G;
+ /* fall-through */
+ case SPEED_2500:
+ aqc111_data->phy_cfg |= AQ_ADV_2G5;
+ /* fall-through */
+ case SPEED_1000:
+ aqc111_data->phy_cfg |= AQ_ADV_1G;
+ /* fall-through */
+ case SPEED_100:
+ aqc111_data->phy_cfg |= AQ_ADV_100M;
+ /* fall-through */
+ }
+ } else {
+ switch (speed) {
+ case SPEED_5000:
+ aqc111_data->phy_cfg |= AQ_ADV_5G;
+ break;
+ case SPEED_2500:
+ aqc111_data->phy_cfg |= AQ_ADV_2G5;
+ break;
+ case SPEED_1000:
+ aqc111_data->phy_cfg |= AQ_ADV_1G;
+ break;
+ case SPEED_100:
+ aqc111_data->phy_cfg |= AQ_ADV_100M;
+ break;
+ }
+ }
+
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg);
+}
+
+static int aqc111_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *elk)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ enum usb_device_speed usb_speed = dev->udev->speed;
+ u8 autoneg = elk->base.autoneg;
+ u32 speed = elk->base.speed;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ if (aqc111_data->autoneg != AUTONEG_ENABLE) {
+ aqc111_data->autoneg = AUTONEG_ENABLE;
+ aqc111_data->advertised_speed =
+ (usb_speed == USB_SPEED_SUPER) ?
+ SPEED_5000 : SPEED_1000;
+ aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+ aqc111_data->advertised_speed);
+ }
+ } else {
+ if (speed != SPEED_100 &&
+ speed != SPEED_1000 &&
+ speed != SPEED_2500 &&
+ speed != SPEED_5000 &&
+ speed != SPEED_UNKNOWN)
+ return -EINVAL;
+
+ if (elk->base.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ if (usb_speed != USB_SPEED_SUPER && speed > SPEED_1000)
+ return -EINVAL;
+
+ aqc111_data->autoneg = AUTONEG_DISABLE;
+ if (speed != SPEED_UNKNOWN)
+ aqc111_data->advertised_speed = speed;
+
+ aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+ aqc111_data->advertised_speed);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops aqc111_ethtool_ops = {
+ .get_drvinfo = aqc111_get_drvinfo,
+ .get_wol = aqc111_get_wol,
+ .set_wol = aqc111_set_wol,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = aqc111_get_link_ksettings,
+ .set_link_ksettings = aqc111_set_link_ksettings
+};
+
+static int aqc111_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 reg16 = 0;
+ u8 buf[5];
+
+ net->mtu = new_mtu;
+ dev->hard_mtu = net->mtu + net->hard_header_len;
+
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ if (net->mtu > 1500)
+ reg16 |= SFR_MEDIUM_JUMBO_EN;
+ else
+ reg16 &= ~SFR_MEDIUM_JUMBO_EN;
+
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ if (dev->net->mtu > 12500 && dev->net->mtu <= 16334) {
+ memcpy(buf, &AQC111_BULKIN_SIZE[2], 5);
+ /* RX bulk configuration */
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL,
+ 5, 5, buf);
+ }
+
+ /* Set high low water level */
+ if (dev->net->mtu <= 4500)
+ reg16 = 0x0810;
+ else if (dev->net->mtu <= 9500)
+ reg16 = 0x1020;
+ else if (dev->net->mtu <= 12500)
+ reg16 = 0x1420;
+ else if (dev->net->mtu <= 16334)
+ reg16 = 0x1A20;
+
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW,
+ 2, &reg16);
+
+ return 0;
+}
+
+static int aqc111_set_mac_addr(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ int ret = 0;
+
+ ret = eth_mac_addr(net, p);
+ if (ret < 0)
+ return ret;
+
+ /* Set the MAC address */
+ return aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN,
+ ETH_ALEN, net->dev_addr);
+}
+
+static int aqc111_vlan_rx_kill_vid(struct net_device *net,
+ __be16 proto, u16 vid)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 vlan_ctrl = 0;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ vlan_ctrl = reg8;
+
+ /* Address */
+ reg8 = (vid / 16);
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8);
+ /* Data */
+ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16);
+ reg16 &= ~(1 << (vid % 16));
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16);
+ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+
+ return 0;
+}
+
+static int aqc111_vlan_rx_add_vid(struct net_device *net, __be16 proto, u16 vid)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 vlan_ctrl = 0;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ vlan_ctrl = reg8;
+
+ /* Address */
+ reg8 = (vid / 16);
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8);
+ /* Data */
+ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16);
+ reg16 |= (1 << (vid % 16));
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16);
+ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+
+ return 0;
+}
+
+static void aqc111_set_rx_mode(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ int mc_count = 0;
+
+ mc_count = netdev_mc_count(net);
+
+ aqc111_data->rxctl &= ~(SFR_RX_CTL_PRO | SFR_RX_CTL_AMALL |
+ SFR_RX_CTL_AM);
+
+ if (net->flags & IFF_PROMISC) {
+ aqc111_data->rxctl |= SFR_RX_CTL_PRO;
+ } else if ((net->flags & IFF_ALLMULTI) || mc_count > AQ_MAX_MCAST) {
+ aqc111_data->rxctl |= SFR_RX_CTL_AMALL;
+ } else if (!netdev_mc_empty(net)) {
+ u8 m_filter[AQ_MCAST_FILTER_SIZE] = { 0 };
+ struct netdev_hw_addr *ha = NULL;
+ u32 crc_bits = 0;
+
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ m_filter[crc_bits >> 3] |= BIT(crc_bits & 7);
+ }
+
+ aqc111_write_cmd_async(dev, AQ_ACCESS_MAC,
+ SFR_MULTI_FILTER_ARRY,
+ AQ_MCAST_FILTER_SIZE,
+ AQ_MCAST_FILTER_SIZE, m_filter);
+
+ aqc111_data->rxctl |= SFR_RX_CTL_AM;
+ }
+
+ aqc111_write16_cmd_async(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &aqc111_data->rxctl);
+}
+
+static int aqc111_set_features(struct net_device *net,
+ netdev_features_t features)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ netdev_features_t changed = net->features ^ features;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ if (changed & NETIF_F_IP_CSUM) {
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8);
+ reg8 ^= SFR_TXCOE_TCP | SFR_TXCOE_UDP;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL,
+ 1, 1, &reg8);
+ }
+
+ if (changed & NETIF_F_IPV6_CSUM) {
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8);
+ reg8 ^= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL,
+ 1, 1, &reg8);
+ }
+
+ if (changed & NETIF_F_RXCSUM) {
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8);
+ if (features & NETIF_F_RXCSUM) {
+ aqc111_data->rx_checksum = 1;
+ reg8 &= ~(SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP |
+ SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6);
+ } else {
+ aqc111_data->rx_checksum = 0;
+ reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP |
+ SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6;
+ }
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL,
+ 1, 1, &reg8);
+ }
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ u16 i = 0;
+
+ for (i = 0; i < 256; i++) {
+ /* Address */
+ reg8 = i;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_ADDRESS,
+ 1, 1, &reg8);
+ /* Data */
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_DATA0,
+ 2, &reg16);
+ reg8 = SFR_VLAN_CONTROL_WE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_CONTROL,
+ 1, 1, &reg8);
+ }
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL,
+ 1, 1, &reg8);
+ reg8 |= SFR_VLAN_CONTROL_VFE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ } else {
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL,
+ 1, 1, &reg8);
+ reg8 &= ~SFR_VLAN_CONTROL_VFE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ }
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops aqc111_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_change_mtu = aqc111_change_mtu,
+ .ndo_set_mac_address = aqc111_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = aqc111_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = aqc111_vlan_rx_kill_vid,
+ .ndo_set_rx_mode = aqc111_set_rx_mode,
+ .ndo_set_features = aqc111_set_features,
+};
+
+static int aqc111_read_perm_mac(struct usbnet *dev)
+{
+ u8 buf[ETH_ALEN];
+ int ret;
+
+ ret = aqc111_read_cmd(dev, AQ_FLASH_PARAMETERS, 0, 0, ETH_ALEN, buf);
+ if (ret < 0)
+ goto out;
+
+ ether_addr_copy(dev->net->perm_addr, buf);
+
+ return 0;
+out:
+ return ret;
+}
+
+static void aqc111_read_fw_version(struct usbnet *dev,
+ struct aqc111_data *aqc111_data)
+{
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MAJOR,
+ 1, 1, &aqc111_data->fw_ver.major);
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MINOR,
+ 1, 1, &aqc111_data->fw_ver.minor);
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_REV,
+ 1, 1, &aqc111_data->fw_ver.rev);
+
+ if (aqc111_data->fw_ver.major & 0x80)
+ aqc111_data->fw_ver.major &= ~0x80;
+}
+
+static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ enum usb_device_speed usb_speed = udev->speed;
+ struct aqc111_data *aqc111_data;
+ int ret;
+
+ /* Check if vendor configuration */
+ if (udev->actconfig->desc.bConfigurationValue != 1) {
+ usb_driver_set_configuration(udev, 1);
+ return -ENODEV;
+ }
+
+ usb_reset_configuration(dev->udev);
+
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "usbnet_get_endpoints failed");
+ return ret;
+ }
+
+ aqc111_data = kzalloc(sizeof(*aqc111_data), GFP_KERNEL);
+ if (!aqc111_data)
+ return -ENOMEM;
+
+ /* store aqc111_data pointer in device data field */
+ dev->driver_priv = aqc111_data;
+
+ /* Init the MAC address */
+ ret = aqc111_read_perm_mac(dev);
+ if (ret)
+ goto out;
+
+ ether_addr_copy(dev->net->dev_addr, dev->net->perm_addr);
+
+ /* Set Rx urb size */
+ dev->rx_urb_size = URB_SIZE;
+
+ /* Set TX needed headroom & tailroom */
+ dev->net->needed_headroom += sizeof(u64);
+ dev->net->needed_tailroom += sizeof(u64);
+
+ dev->net->max_mtu = 16334;
+
+ dev->net->netdev_ops = &aqc111_netdev_ops;
+ dev->net->ethtool_ops = &aqc111_ethtool_ops;
+
+ if (usb_device_no_sg_constraint(dev->udev))
+ dev->can_dma_sg = 1;
+
+ dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE;
+ dev->net->features |= AQ_SUPPORT_FEATURE;
+ dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE;
+
+ netif_set_gso_max_size(dev->net, 65535);
+
+ aqc111_read_fw_version(dev, aqc111_data);
+ aqc111_data->autoneg = AUTONEG_ENABLE;
+ aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ?
+ SPEED_5000 : SPEED_1000;
+
+ return 0;
+
+out:
+ kfree(aqc111_data);
+ return ret;
+}
+
+static void aqc111_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 reg16;
+
+ /* Force bz */
+ reg16 = SFR_PHYPWR_RSTCTL_BZ;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL,
+ 2, &reg16);
+ reg16 = 0;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL,
+ 2, &reg16);
+
+ /* Power down ethernet PHY */
+ aqc111_data->phy_cfg &= ~AQ_ADV_MASK;
+ aqc111_data->phy_cfg |= AQ_LOW_POWER;
+ aqc111_data->phy_cfg &= ~AQ_PHY_POWER_EN;
+ aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+
+ kfree(aqc111_data);
+}
+
+static void aqc111_status(struct usbnet *dev, struct urb *urb)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u64 *event_data = NULL;
+ int link = 0;
+
+ if (urb->actual_length < sizeof(*event_data))
+ return;
+
+ event_data = urb->transfer_buffer;
+ le64_to_cpus(event_data);
+
+ if (*event_data & AQ_LS_MASK)
+ link = 1;
+ else
+ link = 0;
+
+ aqc111_data->link_speed = (*event_data & AQ_SPEED_MASK) >>
+ AQ_SPEED_SHIFT;
+ aqc111_data->link = link;
+
+ if (netif_carrier_ok(dev->net) != link)
+ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+}
+
+static void aqc111_configure_rx(struct usbnet *dev,
+ struct aqc111_data *aqc111_data)
+{
+ enum usb_device_speed usb_speed = dev->udev->speed;
+ u16 link_speed = 0, usb_host = 0;
+ u8 buf[5] = { 0 };
+ u8 queue_num = 0;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ buf[0] = 0x00;
+ buf[1] = 0xF8;
+ buf[2] = 0x07;
+ switch (aqc111_data->link_speed) {
+ case AQ_INT_SPEED_5G:
+ link_speed = 5000;
+ reg8 = 0x05;
+ reg16 = 0x001F;
+ break;
+ case AQ_INT_SPEED_2_5G:
+ link_speed = 2500;
+ reg16 = 0x003F;
+ break;
+ case AQ_INT_SPEED_1G:
+ link_speed = 1000;
+ reg16 = 0x009F;
+ break;
+ case AQ_INT_SPEED_100M:
+ link_speed = 100;
+ queue_num = 1;
+ reg16 = 0x063F;
+ buf[1] = 0xFB;
+ buf[2] = 0x4;
+ break;
+ }
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_INTER_PACKET_GAP_0,
+ 1, 1, &reg8);
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TX_PAUSE_RESEND_T, 3, 3, buf);
+
+ switch (usb_speed) {
+ case USB_SPEED_SUPER:
+ usb_host = 3;
+ break;
+ case USB_SPEED_HIGH:
+ usb_host = 2;
+ break;
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ usb_host = 1;
+ queue_num = 0;
+ break;
+ default:
+ usb_host = 0;
+ break;
+ }
+
+ if (dev->net->mtu > 12500 && dev->net->mtu <= 16334)
+ queue_num = 2; /* For Jumbo packet 16KB */
+
+ memcpy(buf, &AQC111_BULKIN_SIZE[queue_num], 5);
+ /* RX bulk configuration */
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf);
+
+ /* Set high low water level */
+ if (dev->net->mtu <= 4500)
+ reg16 = 0x0810;
+ else if (dev->net->mtu <= 9500)
+ reg16 = 0x1020;
+ else if (dev->net->mtu <= 12500)
+ reg16 = 0x1420;
+ else if (dev->net->mtu <= 16334)
+ reg16 = 0x1A20;
+
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW,
+ 2, &reg16);
+ netdev_info(dev->net, "Link Speed %d, USB %d", link_speed, usb_host);
+}
+
+static void aqc111_configure_csum_offload(struct usbnet *dev)
+{
+ u8 reg8 = 0;
+
+ if (dev->net->features & NETIF_F_RXCSUM) {
+ reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP |
+ SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6;
+ }
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8);
+
+ reg8 = 0;
+ if (dev->net->features & NETIF_F_IP_CSUM)
+ reg8 |= SFR_TXCOE_IP | SFR_TXCOE_TCP | SFR_TXCOE_UDP;
+
+ if (dev->net->features & NETIF_F_IPV6_CSUM)
+ reg8 |= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6;
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8);
+}
+
+static int aqc111_link_reset(struct usbnet *dev)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ if (aqc111_data->link == 1) { /* Link up */
+ aqc111_configure_rx(dev, aqc111_data);
+
+ /* Vlan Tag Filter */
+ reg8 = SFR_VLAN_CONTROL_VSO;
+ if (dev->net->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ reg8 |= SFR_VLAN_CONTROL_VFE;
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL,
+ 1, 1, &reg8);
+
+ reg8 = 0x0;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL,
+ 1, 1, &reg8);
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMTX_DMA_CONTROL,
+ 1, 1, &reg8);
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ARC_CTRL, 1, 1, &reg8);
+
+ reg16 = SFR_RX_CTL_IPE | SFR_RX_CTL_AB;
+ aqc111_data->rxctl = reg16;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+
+ reg8 = SFR_RX_PATH_READY;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH,
+ 1, 1, &reg8);
+
+ reg8 = SFR_BULK_OUT_EFF_EN;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL,
+ 1, 1, &reg8);
+
+ reg16 = 0;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ reg16 = SFR_MEDIUM_XGMIIMODE | SFR_MEDIUM_FULL_DUPLEX;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ aqc111_configure_csum_offload(dev);
+
+ aqc111_set_rx_mode(dev->net);
+
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ if (dev->net->mtu > 1500)
+ reg16 |= SFR_MEDIUM_JUMBO_EN;
+
+ reg16 |= SFR_MEDIUM_RECEIVE_EN | SFR_MEDIUM_RXFLOW_CTRLEN |
+ SFR_MEDIUM_TXFLOW_CTRLEN;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ aqc111_data->rxctl |= SFR_RX_CTL_START;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &aqc111_data->rxctl);
+
+ netif_carrier_on(dev->net);
+ } else {
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg16 &= ~SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ aqc111_data->rxctl &= ~SFR_RX_CTL_START;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &aqc111_data->rxctl);
+
+ reg8 = SFR_BULK_OUT_FLUSH_EN | SFR_BULK_OUT_EFF_EN;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL,
+ 1, 1, &reg8);
+ reg8 = SFR_BULK_OUT_EFF_EN;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL,
+ 1, 1, &reg8);
+
+ netif_carrier_off(dev->net);
+ }
+ return 0;
+}
+
+static int aqc111_reset(struct usbnet *dev)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u8 reg8 = 0;
+
+ dev->rx_urb_size = URB_SIZE;
+
+ if (usb_device_no_sg_constraint(dev->udev))
+ dev->can_dma_sg = 1;
+
+ dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE;
+ dev->net->features |= AQ_SUPPORT_FEATURE;
+ dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE;
+
+ /* Power up ethernet PHY */
+ aqc111_data->phy_cfg = AQ_PHY_POWER_EN;
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+
+ /* Set the MAC address */
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN,
+ ETH_ALEN, dev->net->dev_addr);
+
+ reg8 = 0xFF;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, &reg8);
+
+ reg8 = 0x0;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_SWP_CTRL, 1, 1, &reg8);
+
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, &reg8);
+ reg8 &= ~(SFR_MONITOR_MODE_EPHYRW | SFR_MONITOR_MODE_RWLC |
+ SFR_MONITOR_MODE_RWMP | SFR_MONITOR_MODE_RWWF |
+ SFR_MONITOR_MODE_RW_FLAG);
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, &reg8);
+
+ netif_carrier_off(dev->net);
+
+ /* Phy advertise */
+ aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+ aqc111_data->advertised_speed);
+
+ return 0;
+}
+
+static int aqc111_stop(struct usbnet *dev)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 reg16 = 0;
+
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg16 &= ~SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg16 = 0;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+
+ /* Put PHY to low power*/
+ aqc111_data->phy_cfg |= AQ_LOW_POWER;
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+
+ netif_carrier_off(dev->net);
+
+ return 0;
+}
+
+static void aqc111_rx_checksum(struct sk_buff *skb, u64 pkt_desc)
+{
+ u32 pkt_type = 0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ /* checksum error bit is set */
+ if (pkt_desc & AQ_RX_PD_L4_ERR || pkt_desc & AQ_RX_PD_L3_ERR)
+ return;
+
+ pkt_type = pkt_desc & AQ_RX_PD_L4_TYPE_MASK;
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (pkt_type == AQ_RX_PD_L4_TCP || pkt_type == AQ_RX_PD_L4_UDP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ struct sk_buff *new_skb = NULL;
+ u32 pkt_total_offset = 0;
+ u64 *pkt_desc_ptr = NULL;
+ u32 start_of_descs = 0;
+ u32 desc_offset = 0; /*RX Header Offset*/
+ u16 pkt_count = 0;
+ u64 desc_hdr = 0;
+ u16 vlan_tag = 0;
+ u32 skb_len = 0;
+
+ if (!skb)
+ goto err;
+
+ if (skb->len == 0)
+ goto err;
+
+ skb_len = skb->len;
+ /* RX Descriptor Header */
+ skb_trim(skb, skb->len - sizeof(desc_hdr));
+ desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb));
+
+ /* Check these packets */
+ desc_offset = (desc_hdr & AQ_RX_DH_DESC_OFFSET_MASK) >>
+ AQ_RX_DH_DESC_OFFSET_SHIFT;
+ pkt_count = desc_hdr & AQ_RX_DH_PKT_CNT_MASK;
+ start_of_descs = skb_len - ((pkt_count + 1) * sizeof(desc_hdr));
+
+ /* self check descs position */
+ if (start_of_descs != desc_offset)
+ goto err;
+
+ /* self check desc_offset from header*/
+ if (desc_offset >= skb_len)
+ goto err;
+
+ if (pkt_count == 0)
+ goto err;
+
+ /* Get the first RX packet descriptor */
+ pkt_desc_ptr = (u64 *)(skb->data + desc_offset);
+
+ while (pkt_count--) {
+ u64 pkt_desc = le64_to_cpup(pkt_desc_ptr);
+ u32 pkt_len_with_padd = 0;
+ u32 pkt_len = 0;
+
+ pkt_len = (u32)((pkt_desc & AQ_RX_PD_LEN_MASK) >>
+ AQ_RX_PD_LEN_SHIFT);
+ pkt_len_with_padd = ((pkt_len + 7) & 0x7FFF8);
+
+ pkt_total_offset += pkt_len_with_padd;
+ if (pkt_total_offset > desc_offset ||
+ (pkt_count == 0 && pkt_total_offset != desc_offset)) {
+ goto err;
+ }
+
+ if (pkt_desc & AQ_RX_PD_DROP ||
+ !(pkt_desc & AQ_RX_PD_RX_OK) ||
+ pkt_len > (dev->hard_mtu + AQ_RX_HW_PAD)) {
+ skb_pull(skb, pkt_len_with_padd);
+ /* Next RX Packet Descriptor */
+ pkt_desc_ptr++;
+ continue;
+ }
+
+ /* Clone SKB */
+ new_skb = skb_clone(skb, GFP_ATOMIC);
+
+ if (!new_skb)
+ goto err;
+
+ new_skb->len = pkt_len;
+ skb_pull(new_skb, AQ_RX_HW_PAD);
+ skb_set_tail_pointer(new_skb, new_skb->len);
+
+ new_skb->truesize = SKB_TRUESIZE(new_skb->len);
+ if (aqc111_data->rx_checksum)
+ aqc111_rx_checksum(new_skb, pkt_desc);
+
+ if (pkt_desc & AQ_RX_PD_VLAN) {
+ vlan_tag = pkt_desc >> AQ_RX_PD_VLAN_SHIFT;
+ __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
+ vlan_tag & VLAN_VID_MASK);
+ }
+
+ usbnet_skb_return(dev, new_skb);
+ if (pkt_count == 0)
+ break;
+
+ skb_pull(skb, pkt_len_with_padd);
+
+ /* Next RX Packet Header */
+ pkt_desc_ptr++;
+
+ new_skb = NULL;
+ }
+
+ return 1;
+
+err:
+ return 0;
+}
+
+static struct sk_buff *aqc111_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int frame_size = dev->maxpacket;
+ struct sk_buff *new_skb = NULL;
+ u64 *tx_desc_ptr = NULL;
+ int padding_size = 0;
+ int headroom = 0;
+ int tailroom = 0;
+ u64 tx_desc = 0;
+ u16 tci = 0;
+
+ /*Length of actual data*/
+ tx_desc |= skb->len & AQ_TX_DESC_LEN_MASK;
+
+ /* TSO MSS */
+ tx_desc |= ((u64)(skb_shinfo(skb)->gso_size & AQ_TX_DESC_MSS_MASK)) <<
+ AQ_TX_DESC_MSS_SHIFT;
+
+ headroom = (skb->len + sizeof(tx_desc)) % 8;
+ if (headroom != 0)
+ padding_size = 8 - headroom;
+
+ if (((skb->len + sizeof(tx_desc) + padding_size) % frame_size) == 0) {
+ padding_size += 8;
+ tx_desc |= AQ_TX_DESC_DROP_PADD;
+ }
+
+ /* Vlan Tag */
+ if (vlan_get_tag(skb, &tci) >= 0) {
+ tx_desc |= AQ_TX_DESC_VLAN;
+ tx_desc |= ((u64)tci & AQ_TX_DESC_VLAN_MASK) <<
+ AQ_TX_DESC_VLAN_SHIFT;
+ }
+
+ if (!dev->can_dma_sg && (dev->net->features & NETIF_F_SG) &&
+ skb_linearize(skb))
+ return NULL;
+
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
+
+ if (!(headroom >= sizeof(tx_desc) && tailroom >= padding_size)) {
+ new_skb = skb_copy_expand(skb, sizeof(tx_desc),
+ padding_size, flags);
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ if (!skb)
+ return NULL;
+ }
+ if (padding_size != 0)
+ skb_put_zero(skb, padding_size);
+ /* Copy TX header */
+ tx_desc_ptr = skb_push(skb, sizeof(tx_desc));
+ *tx_desc_ptr = cpu_to_le64(tx_desc);
+
+ usbnet_set_skb_tx_stats(skb, 1, 0);
+
+ return skb;
+}
+
+static const struct driver_info aqc111_info = {
+ .description = "Aquantia AQtion USB to 5GbE Controller",
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
+#define ASIX111_DESC \
+"ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter"
+
+static const struct driver_info asix111_info = {
+ .description = ASIX111_DESC,
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
+#undef ASIX111_DESC
+
+#define ASIX112_DESC \
+"ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter"
+
+static const struct driver_info asix112_info = {
+ .description = ASIX112_DESC,
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
+#undef ASIX112_DESC
+
+static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 temp_rx_ctrl = 0x00;
+ u16 reg16;
+ u8 reg8;
+
+ usbnet_suspend(intf, message);
+
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+ temp_rx_ctrl = reg16;
+ /* Stop RX operations*/
+ reg16 &= ~SFR_RX_CTL_START;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+ /* Force bz */
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL,
+ 2, &reg16);
+ reg16 |= SFR_PHYPWR_RSTCTL_BZ;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL,
+ 2, &reg16);
+
+ reg8 = SFR_BULK_OUT_EFF_EN;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL,
+ 1, 1, &reg8);
+
+ temp_rx_ctrl &= ~(SFR_RX_CTL_START | SFR_RX_CTL_RF_WAK |
+ SFR_RX_CTL_AP | SFR_RX_CTL_AM);
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &temp_rx_ctrl);
+
+ reg8 = 0x00;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH,
+ 1, 1, &reg8);
+
+ if (aqc111_data->wol_flags) {
+ struct aqc111_wol_cfg wol_cfg = { 0 };
+
+ aqc111_data->phy_cfg |= AQ_WOL;
+ ether_addr_copy(wol_cfg.hw_addr, dev->net->dev_addr);
+ wol_cfg.flags = aqc111_data->wol_flags;
+
+ temp_rx_ctrl |= (SFR_RX_CTL_AB | SFR_RX_CTL_START);
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &temp_rx_ctrl);
+ reg8 = 0x00;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK,
+ 1, 1, &reg8);
+ reg8 = SFR_BMRX_DMA_EN;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL,
+ 1, 1, &reg8);
+ reg8 = SFR_RX_PATH_READY;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH,
+ 1, 1, &reg8);
+ reg8 = 0x07;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL,
+ 1, 1, &reg8);
+ reg8 = 0x00;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_RX_BULKIN_QTIMR_LOW, 1, 1, &reg8);
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_RX_BULKIN_QTIMR_HIGH, 1, 1, &reg8);
+ reg8 = 0xFF;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QSIZE,
+ 1, 1, &reg8);
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QIFG,
+ 1, 1, &reg8);
+
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_MEDIUM_STATUS_MODE, 2, &reg16);
+ reg16 |= SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_MEDIUM_STATUS_MODE, 2, &reg16);
+
+ aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0,
+ WOL_CFG_SIZE, &wol_cfg);
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+ } else {
+ aqc111_data->phy_cfg |= AQ_LOW_POWER;
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+
+ /* Disable RX path */
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_MEDIUM_STATUS_MODE, 2, &reg16);
+ reg16 &= ~SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_MEDIUM_STATUS_MODE, 2, &reg16);
+ }
+
+ return 0;
+}
+
+static int aqc111_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 reg16;
+ u8 reg8;
+
+ netif_carrier_off(dev->net);
+
+ /* Power up ethernet PHY */
+ aqc111_data->phy_cfg |= AQ_PHY_POWER_EN;
+ aqc111_data->phy_cfg &= ~AQ_LOW_POWER;
+ aqc111_data->phy_cfg &= ~AQ_WOL;
+
+ reg8 = 0xFF;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK,
+ 1, 1, &reg8);
+ /* Configure RX control register => start operation */
+ reg16 = aqc111_data->rxctl;
+ reg16 &= ~SFR_RX_CTL_START;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+
+ reg16 |= SFR_RX_CTL_START;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+
+ aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+ aqc111_data->advertised_speed);
+
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg16 |= SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg8 = SFR_RX_PATH_READY;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH,
+ 1, 1, &reg8);
+ reg8 = 0x0;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, &reg8);
+
+ return usbnet_resume(intf);
+}
+
+#define AQC111_USB_ETH_DEV(vid, pid, table) \
+ USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_VENDOR_SPEC), \
+ .driver_info = (unsigned long)&(table) \
+}, \
+{ \
+ USB_DEVICE_AND_INTERFACE_INFO((vid), (pid), \
+ USB_CLASS_COMM, \
+ USB_CDC_SUBCLASS_ETHERNET, \
+ USB_CDC_PROTO_NONE), \
+ .driver_info = (unsigned long)&(table),
+
+static const struct usb_device_id products[] = {
+ {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)},
+ {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
+ {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
+ { },/* END */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver aq_driver = {
+ .name = "aqc111",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = aqc111_suspend,
+ .resume = aqc111_resume,
+ .disconnect = usbnet_disconnect,
+};
+
+module_usb_driver(aq_driver);
+
+MODULE_DESCRIPTION("Aquantia AQtion USB to 5/2.5GbE Controllers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/aqc111.h b/drivers/net/usb/aqc111.h
new file mode 100644
index 000000000000..4d68b3a6067c
--- /dev/null
+++ b/drivers/net/usb/aqc111.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
+ * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2002-2003 TiVo Inc.
+ * Copyright (C) 2017-2018 ASIX
+ * Copyright (C) 2018 Aquantia Corp.
+ */
+
+#ifndef __LINUX_USBNET_AQC111_H
+#define __LINUX_USBNET_AQC111_H
+
+#define URB_SIZE (1024 * 62)
+
+#define AQ_MCAST_FILTER_SIZE 8
+#define AQ_MAX_MCAST 64
+
+#define AQ_ACCESS_MAC 0x01
+#define AQ_FLASH_PARAMETERS 0x20
+#define AQ_PHY_POWER 0x31
+#define AQ_WOL_CFG 0x60
+#define AQ_PHY_OPS 0x61
+
+#define AQ_USB_PHY_SET_TIMEOUT 10000
+#define AQ_USB_SET_TIMEOUT 4000
+
+/* Feature. ********************************************/
+#define AQ_SUPPORT_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\
+ NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX |\
+ NETIF_F_HW_VLAN_CTAG_RX)
+
+#define AQ_SUPPORT_HW_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\
+ NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_FILTER)
+
+#define AQ_SUPPORT_VLAN_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\
+ NETIF_F_TSO)
+
+/* SFR Reg. ********************************************/
+
+#define SFR_GENERAL_STATUS 0x03
+#define SFR_CHIP_STATUS 0x05
+#define SFR_RX_CTL 0x0B
+ #define SFR_RX_CTL_TXPADCRC 0x0400
+ #define SFR_RX_CTL_IPE 0x0200
+ #define SFR_RX_CTL_DROPCRCERR 0x0100
+ #define SFR_RX_CTL_START 0x0080
+ #define SFR_RX_CTL_RF_WAK 0x0040
+ #define SFR_RX_CTL_AP 0x0020
+ #define SFR_RX_CTL_AM 0x0010
+ #define SFR_RX_CTL_AB 0x0008
+ #define SFR_RX_CTL_AMALL 0x0002
+ #define SFR_RX_CTL_PRO 0x0001
+ #define SFR_RX_CTL_STOP 0x0000
+#define SFR_INTER_PACKET_GAP_0 0x0D
+#define SFR_NODE_ID 0x10
+#define SFR_MULTI_FILTER_ARRY 0x16
+#define SFR_MEDIUM_STATUS_MODE 0x22
+ #define SFR_MEDIUM_XGMIIMODE 0x0001
+ #define SFR_MEDIUM_FULL_DUPLEX 0x0002
+ #define SFR_MEDIUM_RXFLOW_CTRLEN 0x0010
+ #define SFR_MEDIUM_TXFLOW_CTRLEN 0x0020
+ #define SFR_MEDIUM_JUMBO_EN 0x0040
+ #define SFR_MEDIUM_RECEIVE_EN 0x0100
+#define SFR_MONITOR_MODE 0x24
+ #define SFR_MONITOR_MODE_EPHYRW 0x01
+ #define SFR_MONITOR_MODE_RWLC 0x02
+ #define SFR_MONITOR_MODE_RWMP 0x04
+ #define SFR_MONITOR_MODE_RWWF 0x08
+ #define SFR_MONITOR_MODE_RW_FLAG 0x10
+ #define SFR_MONITOR_MODE_PMEPOL 0x20
+ #define SFR_MONITOR_MODE_PMETYPE 0x40
+#define SFR_PHYPWR_RSTCTL 0x26
+ #define SFR_PHYPWR_RSTCTL_BZ 0x0010
+ #define SFR_PHYPWR_RSTCTL_IPRL 0x0020
+#define SFR_VLAN_ID_ADDRESS 0x2A
+#define SFR_VLAN_ID_CONTROL 0x2B
+ #define SFR_VLAN_CONTROL_WE 0x0001
+ #define SFR_VLAN_CONTROL_RD 0x0002
+ #define SFR_VLAN_CONTROL_VSO 0x0010
+ #define SFR_VLAN_CONTROL_VFE 0x0020
+#define SFR_VLAN_ID_DATA0 0x2C
+#define SFR_VLAN_ID_DATA1 0x2D
+#define SFR_RX_BULKIN_QCTRL 0x2E
+ #define SFR_RX_BULKIN_QCTRL_TIME 0x01
+ #define SFR_RX_BULKIN_QCTRL_IFG 0x02
+ #define SFR_RX_BULKIN_QCTRL_SIZE 0x04
+#define SFR_RX_BULKIN_QTIMR_LOW 0x2F
+#define SFR_RX_BULKIN_QTIMR_HIGH 0x30
+#define SFR_RX_BULKIN_QSIZE 0x31
+#define SFR_RX_BULKIN_QIFG 0x32
+#define SFR_RXCOE_CTL 0x34
+ #define SFR_RXCOE_IP 0x01
+ #define SFR_RXCOE_TCP 0x02
+ #define SFR_RXCOE_UDP 0x04
+ #define SFR_RXCOE_ICMP 0x08
+ #define SFR_RXCOE_IGMP 0x10
+ #define SFR_RXCOE_TCPV6 0x20
+ #define SFR_RXCOE_UDPV6 0x40
+ #define SFR_RXCOE_ICMV6 0x80
+#define SFR_TXCOE_CTL 0x35
+ #define SFR_TXCOE_IP 0x01
+ #define SFR_TXCOE_TCP 0x02
+ #define SFR_TXCOE_UDP 0x04
+ #define SFR_TXCOE_ICMP 0x08
+ #define SFR_TXCOE_IGMP 0x10
+ #define SFR_TXCOE_TCPV6 0x20
+ #define SFR_TXCOE_UDPV6 0x40
+ #define SFR_TXCOE_ICMV6 0x80
+#define SFR_BM_INT_MASK 0x41
+#define SFR_BMRX_DMA_CONTROL 0x43
+ #define SFR_BMRX_DMA_EN 0x80
+#define SFR_BMTX_DMA_CONTROL 0x46
+#define SFR_PAUSE_WATERLVL_LOW 0x54
+#define SFR_PAUSE_WATERLVL_HIGH 0x55
+#define SFR_ARC_CTRL 0x9E
+#define SFR_SWP_CTRL 0xB1
+#define SFR_TX_PAUSE_RESEND_T 0xB2
+#define SFR_ETH_MAC_PATH 0xB7
+ #define SFR_RX_PATH_READY 0x01
+#define SFR_BULK_OUT_CTRL 0xB9
+ #define SFR_BULK_OUT_FLUSH_EN 0x01
+ #define SFR_BULK_OUT_EFF_EN 0x02
+
+#define AQ_FW_VER_MAJOR 0xDA
+#define AQ_FW_VER_MINOR 0xDB
+#define AQ_FW_VER_REV 0xDC
+
+/*PHY_OPS**********************************************************************/
+
+#define AQ_ADV_100M BIT(0)
+#define AQ_ADV_1G BIT(1)
+#define AQ_ADV_2G5 BIT(2)
+#define AQ_ADV_5G BIT(3)
+#define AQ_ADV_MASK 0x0F
+
+#define AQ_PAUSE BIT(16)
+#define AQ_ASYM_PAUSE BIT(17)
+#define AQ_LOW_POWER BIT(18)
+#define AQ_PHY_POWER_EN BIT(19)
+#define AQ_WOL BIT(20)
+#define AQ_DOWNSHIFT BIT(21)
+
+#define AQ_DSH_RETRIES_SHIFT 0x18
+#define AQ_DSH_RETRIES_MASK 0xF000000
+
+#define AQ_WOL_FLAG_MP 0x2
+
+/******************************************************************************/
+
+struct aqc111_wol_cfg {
+ u8 hw_addr[6];
+ u8 flags;
+ u8 rsvd[283];
+} __packed;
+
+#define WOL_CFG_SIZE sizeof(struct aqc111_wol_cfg)
+
+struct aqc111_data {
+ u16 rxctl;
+ u8 rx_checksum;
+ u8 link_speed;
+ u8 link;
+ u8 autoneg;
+ u32 advertised_speed;
+ struct {
+ u8 major;
+ u8 minor;
+ u8 rev;
+ } fw_ver;
+ u32 phy_cfg;
+ u8 wol_flags;
+};
+
+#define AQ_LS_MASK 0x8000
+#define AQ_SPEED_MASK 0x7F00
+#define AQ_SPEED_SHIFT 0x0008
+#define AQ_INT_SPEED_5G 0x000F
+#define AQ_INT_SPEED_2_5G 0x0010
+#define AQ_INT_SPEED_1G 0x0011
+#define AQ_INT_SPEED_100M 0x0013
+
+/* TX Descriptor */
+#define AQ_TX_DESC_LEN_MASK 0x1FFFFF
+#define AQ_TX_DESC_DROP_PADD BIT(28)
+#define AQ_TX_DESC_VLAN BIT(29)
+#define AQ_TX_DESC_MSS_MASK 0x7FFF
+#define AQ_TX_DESC_MSS_SHIFT 0x20
+#define AQ_TX_DESC_VLAN_MASK 0xFFFF
+#define AQ_TX_DESC_VLAN_SHIFT 0x30
+
+#define AQ_RX_HW_PAD 0x02
+
+/* RX Packet Descriptor */
+#define AQ_RX_PD_L4_ERR BIT(0)
+#define AQ_RX_PD_L3_ERR BIT(1)
+#define AQ_RX_PD_L4_TYPE_MASK 0x1C
+#define AQ_RX_PD_L4_UDP 0x04
+#define AQ_RX_PD_L4_TCP 0x10
+#define AQ_RX_PD_L3_TYPE_MASK 0x60
+#define AQ_RX_PD_L3_IP 0x20
+#define AQ_RX_PD_L3_IP6 0x40
+
+#define AQ_RX_PD_VLAN BIT(10)
+#define AQ_RX_PD_RX_OK BIT(11)
+#define AQ_RX_PD_DROP BIT(31)
+#define AQ_RX_PD_LEN_MASK 0x7FFF0000
+#define AQ_RX_PD_LEN_SHIFT 0x10
+#define AQ_RX_PD_VLAN_SHIFT 0x20
+
+/* RX Descriptor header */
+#define AQ_RX_DH_PKT_CNT_MASK 0x1FFF
+#define AQ_RX_DH_DESC_OFFSET_MASK 0xFFFFE000
+#define AQ_RX_DH_DESC_OFFSET_SHIFT 0x0D
+
+static struct {
+ unsigned char ctrl;
+ unsigned char timer_l;
+ unsigned char timer_h;
+ unsigned char size;
+ unsigned char ifg;
+} AQC111_BULKIN_SIZE[] = {
+ /* xHCI & EHCI & OHCI */
+ {7, 0x00, 0x01, 0x1E, 0xFF},/* 10G, 5G, 2.5G, 1G */
+ {7, 0xA0, 0x00, 0x14, 0x00},/* 100M */
+ /* Jumbo packet */
+ {7, 0x00, 0x01, 0x18, 0xFF},
+};
+
+#endif /* __LINUX_USBNET_AQC111_H */
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5c42cf81a08b..b3b3c05903a1 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -562,6 +562,8 @@ static const struct driver_info wwan_info = {
#define MICROSOFT_VENDOR_ID 0x045e
#define UBLOX_VENDOR_ID 0x1546
#define TPLINK_VENDOR_ID 0x2357
+#define AQUANTIA_VENDOR_ID 0x2eca
+#define ASIX_VENDOR_ID 0x0b95
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -821,6 +823,30 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101,
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter(based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2790, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter(based on AQC112U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2791, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index be1917be28f2..3c8bdac78866 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
+#include <linux/linkmode.h>
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -1586,18 +1587,17 @@ static int lan78xx_set_pause(struct net_device *net,
dev->fc_request_control |= FLOW_CTRL_TX;
if (ecmd.base.autoneg) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
u32 mii_adv;
- u32 advertising;
- ethtool_convert_link_mode_to_legacy_u32(
- &advertising, ecmd.link_modes.advertising);
-
- advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ecmd.link_modes.advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ecmd.link_modes.advertising);
mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
-
- ethtool_convert_legacy_u32_to_link_mode(
- ecmd.link_modes.advertising, advertising);
+ mii_adv_to_linkmode_adv_t(fc, mii_adv);
+ linkmode_or(ecmd.link_modes.advertising, fc,
+ ecmd.link_modes.advertising);
phy_ethtool_ksettings_set(phydev, &ecmd);
}
@@ -2095,6 +2095,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
static int lan78xx_phy_init(struct lan78xx_net *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
int ret;
u32 mii_adv;
struct phy_device *phydev;
@@ -2158,9 +2159,13 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
/* support both flow controls */
dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+ mii_adv_to_linkmode_adv_t(fc, mii_adv);
+ linkmode_or(phydev->advertising, fc, phydev->advertising);
if (phydev->mdio.dev.of_node) {
u32 reg;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index f2d01cb6f958..e3d08626828e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -618,9 +618,7 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
return;
}
- memcpy(&intdata, urb->transfer_buffer, 4);
- le32_to_cpus(&intdata);
-
+ intdata = get_unaligned_le32(urb->transfer_buffer);
netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
if (intdata & INT_ENP_PHY_INT_)
@@ -1295,6 +1293,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->features |= NETIF_F_RXCSUM;
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
smsc95xx_init_mac_address(dev);
@@ -1933,8 +1932,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
unsigned char *packet;
u16 size;
- memcpy(&header, skb->data, sizeof(header));
- le32_to_cpus(&header);
+ header = get_unaligned_le32(skb->data);
skb_pull(skb, 4 + NET_IP_ALIGN);
packet = skb->data;
@@ -2011,12 +2009,30 @@ static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
return (high_16 << 16) | low_16;
}
+/* The TX CSUM won't work if the checksum lies in the last 4 bytes of the
+ * transmission. This is fairly unlikely, only seems to trigger with some
+ * short TCP ACK packets sent.
+ *
+ * Note, this calculation should probably check for the alignment of the
+ * data as well, but a straight check for csum being in the last four bytes
+ * of the packet should be ok for now.
+ */
+static bool smsc95xx_can_tx_checksum(struct sk_buff *skb)
+{
+ unsigned int len = skb->len - skb_checksum_start_offset(skb);
+
+ if (skb->len <= 45)
+ return false;
+ return skb->csum_offset < (len - (4 + 1));
+}
+
static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
struct sk_buff *skb, gfp_t flags)
{
bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
u32 tx_cmd_a, tx_cmd_b;
+ void *ptr;
/* We do not advertise SG, so skbs should be already linearized */
BUG_ON(skb_shinfo(skb)->nr_frags);
@@ -2030,8 +2046,11 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
return NULL;
}
+ tx_cmd_b = (u32)skb->len;
+ tx_cmd_a = tx_cmd_b | TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
+
if (csum) {
- if (skb->len <= 45) {
+ if (!smsc95xx_can_tx_checksum(skb)) {
/* workaround - hardware tx checksum does not work
* properly with extremely small packets */
long csstart = skb_checksum_start_offset(skb);
@@ -2043,24 +2062,18 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
csum = false;
} else {
u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
- skb_push(skb, 4);
- cpu_to_le32s(&csum_preamble);
- memcpy(skb->data, &csum_preamble, 4);
+ ptr = skb_push(skb, 4);
+ put_unaligned_le32(csum_preamble, ptr);
+
+ tx_cmd_a += 4;
+ tx_cmd_b += 4;
+ tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
}
}
- skb_push(skb, 4);
- tx_cmd_b = (u32)(skb->len - 4);
- if (csum)
- tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
- cpu_to_le32s(&tx_cmd_b);
- memcpy(skb->data, &tx_cmd_b, 4);
-
- skb_push(skb, 4);
- tx_cmd_a = (u32)(skb->len - 8) | TX_CMD_A_FIRST_SEG_ |
- TX_CMD_A_LAST_SEG_;
- cpu_to_le32s(&tx_cmd_a);
- memcpy(skb->data, &tx_cmd_a, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_cmd_a, ptr);
+ put_unaligned_le32(tx_cmd_b, ptr+4);
return skb;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 890fa5b905e2..f412ea1cef18 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1253,7 +1253,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
return PTR_ERR(net);
peer = rtnl_create_link(net, ifname, name_assign_type,
- &veth_link_ops, tbp);
+ &veth_link_ops, tbp, extack);
if (IS_ERR(peer)) {
put_net(net);
return PTR_ERR(peer);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 69b7227c637e..21ad4b1d7f03 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -981,24 +981,23 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
int orig_iif = skb->skb_iif;
- bool need_strict;
+ bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
+ bool is_ndisc = ipv6_ndisc_frame(skb);
- /* loopback traffic; do not push through packet taps again.
- * Reset pkt_type for upper layers to process skb
+ /* loopback, multicast & non-ND link-local traffic; do not push through
+ * packet taps again. Reset pkt_type for upper layers to process skb
*/
- if (skb->pkt_type == PACKET_LOOPBACK) {
+ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
- skb->pkt_type = PACKET_HOST;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ skb->pkt_type = PACKET_HOST;
goto out;
}
- /* if packet is NDISC or addressed to multicast or link-local
- * then keep the ingress interface
- */
- need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
- if (!ipv6_ndisc_frame(skb) && !need_strict) {
+ /* if packet is NDISC then keep the ingress interface */
+ if (!is_ndisc) {
vrf_rx_stats(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 297cdeaef479..911066299a83 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -79,9 +79,11 @@ struct vxlan_fdb {
u8 eth_addr[ETH_ALEN];
u16 state; /* see ndm_state */
__be32 vni;
- u8 flags; /* see ndm_flags */
+ u16 flags; /* see ndm_flags and below */
};
+#define NTF_VXLAN_ADDED_BY_USER 0x100
+
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
@@ -376,6 +378,7 @@ static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
.remote_ifindex = rd->remote_ifindex,
.vni = fdb->vni,
.offloaded = rd->offloaded,
+ .added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER,
};
memcpy(info.eth_addr, fdb->eth_addr, ETH_ALEN);
@@ -384,15 +387,19 @@ static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
}
static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd, int type)
+ struct vxlan_rdst *rd, int type, bool swdev_notify)
{
- switch (type) {
- case RTM_NEWNEIGH:
- vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, true);
- break;
- case RTM_DELNEIGH:
- vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, false);
- break;
+ if (swdev_notify) {
+ switch (type) {
+ case RTM_NEWNEIGH:
+ vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
+ true);
+ break;
+ case RTM_DELNEIGH:
+ vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
+ false);
+ break;
+ }
}
__vxlan_fdb_notify(vxlan, fdb, rd, type);
@@ -409,7 +416,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
.remote_vni = cpu_to_be32(VXLAN_N_VID),
};
- vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
+ vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
}
static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -421,7 +428,7 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
memcpy(f.eth_addr, eth_addr, ETH_ALEN);
- vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
+ vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
}
/* Hash Ethernet address */
@@ -540,6 +547,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
fdb_info->remote_ifindex = rdst->remote_ifindex;
fdb_info->vni = vni;
fdb_info->offloaded = rdst->offloaded;
+ fdb_info->added_by_user = f->flags & NTF_VXLAN_ADDED_BY_USER;
ether_addr_copy(fdb_info->eth_addr, mac);
out:
@@ -700,7 +708,7 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
const u8 *mac, __u16 state,
- __be32 src_vni, __u8 ndm_flags)
+ __be32 src_vni, __u16 ndm_flags)
{
struct vxlan_fdb *f;
@@ -720,7 +728,7 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
- __be32 vni, __u32 ifindex, __u8 ndm_flags,
+ __be32 vni, __u32 ifindex, __u16 ndm_flags,
struct vxlan_fdb **fdb)
{
struct vxlan_rdst *rd = NULL;
@@ -756,9 +764,10 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u8 ndm_flags)
+ __u32 ifindex, __u16 ndm_flags,
+ bool swdev_notify)
{
- __u8 fdb_flags = (ndm_flags & ~NTF_USE);
+ __u16 fdb_flags = (ndm_flags & ~NTF_USE);
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int notify = 0;
@@ -771,16 +780,24 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
"lost race to create %pM\n", mac);
return -EEXIST;
}
- if (f->state != state) {
- f->state = state;
- f->updated = jiffies;
- notify = 1;
- }
- if (f->flags != fdb_flags) {
- f->flags = fdb_flags;
- f->updated = jiffies;
- notify = 1;
+
+ /* Do not allow an externally learned entry to take over an
+ * entry added by the user.
+ */
+ if (!(fdb_flags & NTF_EXT_LEARNED) ||
+ !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
+ if (f->state != state) {
+ f->state = state;
+ f->updated = jiffies;
+ notify = 1;
+ }
+ if (f->flags != fdb_flags) {
+ f->flags = fdb_flags;
+ f->updated = jiffies;
+ notify = 1;
+ }
}
+
if ((flags & NLM_F_REPLACE)) {
/* Only change unicasts */
if (!(is_multicast_ether_addr(f->eth_addr) ||
@@ -822,7 +839,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
if (notify) {
if (rd == NULL)
rd = first_remote_rtnl(f);
- vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify);
}
return 0;
@@ -841,7 +858,7 @@ static void vxlan_fdb_free(struct rcu_head *head)
}
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
- bool do_notify)
+ bool do_notify, bool swdev_notify)
{
struct vxlan_rdst *rd;
@@ -851,7 +868,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
--vxlan->addrcnt;
if (do_notify)
list_for_each_entry(rd, &f->remotes, list)
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
+ swdev_notify);
hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free);
@@ -866,10 +884,10 @@ static void vxlan_dst_free(struct rcu_head *head)
}
static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
- struct vxlan_rdst *rd)
+ struct vxlan_rdst *rd, bool swdev_notify)
{
list_del_rcu(&rd->list);
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify);
call_rcu(&rd->rcu, vxlan_dst_free);
}
@@ -968,7 +986,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
- port, src_vni, vni, ifindex, ndm->ndm_flags);
+ port, src_vni, vni, ifindex,
+ ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
+ true);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -977,7 +997,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
const unsigned char *addr, union vxlan_addr ip,
__be16 port, __be32 src_vni, __be32 vni,
- u32 ifindex, u16 vid)
+ u32 ifindex, bool swdev_notify)
{
struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
@@ -997,11 +1017,11 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
* otherwise destroy the fdb entry
*/
if (rd && !list_is_singular(&f->remotes)) {
- vxlan_fdb_dst_destroy(vxlan, f, rd);
+ vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify);
goto out;
}
- vxlan_fdb_destroy(vxlan, f, true);
+ vxlan_fdb_destroy(vxlan, f, true, swdev_notify);
out:
return 0;
@@ -1025,7 +1045,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
spin_lock_bh(&vxlan->hash_lock);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
- vid);
+ true);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -1103,7 +1123,7 @@ static bool vxlan_snoop(struct net_device *dev,
rdst->remote_ip = *src_ip;
f->updated = jiffies;
- vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
+ vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true);
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
@@ -1116,7 +1136,7 @@ static bool vxlan_snoop(struct net_device *dev,
vxlan->cfg.dst_port,
vni,
vxlan->default_dst.remote_vni,
- ifindex, NTF_SELF);
+ ifindex, NTF_SELF, true);
spin_unlock(&vxlan->hash_lock);
}
@@ -1552,6 +1572,34 @@ drop:
return 0;
}
+/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
+static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan;
+ struct vxlan_sock *vs;
+ struct vxlanhdr *hdr;
+ __be32 vni;
+
+ if (skb->len < VXLAN_HLEN)
+ return -EINVAL;
+
+ hdr = vxlan_hdr(skb);
+
+ if (!(hdr->vx_flags & VXLAN_HF_VNI))
+ return -EINVAL;
+
+ vs = rcu_dereference_sk_user_data(sk);
+ if (!vs)
+ return -ENOENT;
+
+ vni = vxlan_vni(hdr->vx_vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ if (!vxlan)
+ return -ENOENT;
+
+ return 0;
+}
+
static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2250,13 +2298,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- /* Bypass encapsulation if the destination is local */
if (!info) {
+ /* Bypass encapsulation if the destination is local */
err = encap_bypass_if_local(skb, dev, vxlan, dst,
dst_port, ifindex, vni,
&rt->dst, rt->rt_flags);
if (err)
goto out_unlock;
+
+ if (vxlan->cfg.df == VXLAN_DF_SET) {
+ df = htons(IP_DF);
+ } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) {
+ struct ethhdr *eth = eth_hdr(skb);
+
+ if (ntohs(eth->h_proto) == ETH_P_IPV6 ||
+ (ntohs(eth->h_proto) == ETH_P_IP &&
+ old_iph->frag_off & htons(IP_DF)))
+ df = htons(IP_DF);
+ }
} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
df = htons(IP_DF);
}
@@ -2461,7 +2520,7 @@ static void vxlan_cleanup(struct timer_list *t)
"garbage collect %pM\n",
f->eth_addr);
f->state = NUD_STALE;
- vxlan_fdb_destroy(vxlan, f, true);
+ vxlan_fdb_destroy(vxlan, f, true, true);
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
@@ -2512,7 +2571,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
spin_lock_bh(&vxlan->hash_lock);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
- vxlan_fdb_destroy(vxlan, f, true);
+ vxlan_fdb_destroy(vxlan, f, true, true);
spin_unlock_bh(&vxlan->hash_lock);
}
@@ -2566,7 +2625,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
- vxlan_fdb_destroy(vxlan, f, true);
+ vxlan_fdb_destroy(vxlan, f, true, true);
}
}
spin_unlock_bh(&vxlan->hash_lock);
@@ -2809,6 +2868,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
[IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
+ [IFLA_VXLAN_DF] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -2865,6 +2925,16 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_VXLAN_DF]) {
+ enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
+
+ if (df < 0 || df > VXLAN_DF_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
+ "Invalid DF attribute");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -2948,6 +3018,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = vxlan_rcv;
+ tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
tunnel_cfg.encap_destroy = NULL;
tunnel_cfg.gro_receive = vxlan_gro_receive;
tunnel_cfg.gro_complete = vxlan_gro_complete;
@@ -3292,13 +3363,14 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
/* notify default fdb entry */
if (f)
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
+ true);
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
errout:
if (f)
- vxlan_fdb_destroy(vxlan, f, false);
+ vxlan_fdb_destroy(vxlan, f, false, false);
return err;
}
@@ -3386,11 +3458,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
conf->flags |= VXLAN_F_LEARN;
}
- if (data[IFLA_VXLAN_AGEING]) {
- if (changelink)
- return -EOPNOTSUPP;
+ if (data[IFLA_VXLAN_AGEING])
conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
- }
if (data[IFLA_VXLAN_PROXY]) {
if (changelink)
@@ -3509,6 +3578,9 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
conf->mtu = nla_get_u32(tb[IFLA_MTU]);
}
+ if (data[IFLA_VXLAN_DF])
+ conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
+
return 0;
}
@@ -3532,6 +3604,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
+ unsigned long old_age_interval;
struct vxlan_rdst old_dst;
struct vxlan_config conf;
struct vxlan_fdb *f = NULL;
@@ -3542,12 +3615,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
+ old_age_interval = vxlan->cfg.age_interval;
memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
if (err)
return err;
+ if (old_age_interval != vxlan->cfg.age_interval)
+ mod_timer(&vxlan->age_timer, jiffies);
+
/* handle default dst entry */
if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
spin_lock_bh(&vxlan->hash_lock);
@@ -3557,7 +3634,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
vxlan->cfg.dst_port,
old_dst.remote_vni,
old_dst.remote_vni,
- old_dst.remote_ifindex, 0);
+ old_dst.remote_ifindex,
+ true);
if (!vxlan_addr_any(&dst->remote_ip)) {
err = vxlan_fdb_create(vxlan, all_zeros_mac,
@@ -3572,7 +3650,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
+ RTM_NEWNEIGH, true);
}
spin_unlock_bh(&vxlan->hash_lock);
}
@@ -3601,6 +3680,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
@@ -3667,6 +3747,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
!!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
+ nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
!!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
@@ -3749,7 +3830,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
memset(&tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
- &vxlan_link_ops, tb);
+ &vxlan_link_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
@@ -3844,18 +3925,89 @@ out:
spin_unlock_bh(&vxlan->hash_lock);
}
+static int
+vxlan_fdb_external_learn_add(struct net_device *dev,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err;
+
+ spin_lock_bh(&vxlan->hash_lock);
+ err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
+ NUD_REACHABLE,
+ NLM_F_CREATE | NLM_F_REPLACE,
+ fdb_info->remote_port,
+ fdb_info->vni,
+ fdb_info->remote_vni,
+ fdb_info->remote_ifindex,
+ NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
+ false);
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ return err;
+}
+
+static int
+vxlan_fdb_external_learn_del(struct net_device *dev,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb *f;
+ int err = 0;
+
+ spin_lock_bh(&vxlan->hash_lock);
+
+ f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ if (!f)
+ err = -ENOENT;
+ else if (f->flags & NTF_EXT_LEARNED)
+ err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr,
+ fdb_info->remote_ip,
+ fdb_info->remote_port,
+ fdb_info->vni,
+ fdb_info->remote_vni,
+ fdb_info->remote_ifindex,
+ false);
+
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ return err;
+}
+
static int vxlan_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info;
+ int err = 0;
switch (event) {
case SWITCHDEV_VXLAN_FDB_OFFLOADED:
vxlan_fdb_offloaded_set(dev, ptr);
break;
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE:
+ fdb_info = ptr;
+ err = vxlan_fdb_external_learn_add(dev, fdb_info);
+ if (err) {
+ err = notifier_from_errno(err);
+ break;
+ }
+ fdb_info->offloaded = true;
+ vxlan_fdb_offloaded_set(dev, fdb_info);
+ break;
+ case SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE:
+ fdb_info = ptr;
+ err = vxlan_fdb_external_learn_del(dev, fdb_info);
+ if (err) {
+ err = notifier_from_errno(err);
+ break;
+ }
+ fdb_info->offloaded = false;
+ vxlan_fdb_offloaded_set(dev, fdb_info);
+ break;
}
- return 0;
+ return err;
}
static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 4d6409605207..7a42336c8af8 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -391,6 +391,7 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return -ENOMEM;
}
+ netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&priv->lock, flags);
/* Start from the next BD that should be filled */
@@ -447,6 +448,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
{
/* Start from the next BD that should be filled */
struct net_device *dev = priv->ndev;
+ unsigned int bytes_sent = 0;
+ int howmany = 0;
struct qe_bd *bd; /* BD pointer */
u16 bd_status;
int tx_restart = 0;
@@ -474,6 +477,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
skb = priv->tx_skbuff[priv->skb_dirtytx];
if (!skb)
break;
+ howmany++;
+ bytes_sent += skb->len;
dev->stats.tx_packets++;
memset(priv->tx_buffer +
(be32_to_cpu(bd->buf) - priv->dma_tx_addr),
@@ -501,6 +506,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
if (tx_restart)
hdlc_tx_restart(priv);
+ netdev_completed_queue(dev, howmany, bytes_sent);
return 0;
}
@@ -721,6 +727,7 @@ static int uhdlc_open(struct net_device *dev)
priv->hdlc_busy = 1;
netif_device_attach(priv->ndev);
napi_enable(&priv->napi);
+ netdev_reset_queue(dev);
netif_start_queue(dev);
hdlc_open(dev);
}
@@ -812,6 +819,7 @@ static int uhdlc_close(struct net_device *dev)
free_irq(priv->ut_info->uf_info.irq, priv);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
priv->hdlc_busy = 0;
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 3e37c8cf82c6..b2ad2122c8c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -342,6 +342,37 @@ static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
return err;
}
+static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
+ struct mmc_command *mc, int sg_cnt, int req_sz,
+ int func_blk_sz, u32 *addr,
+ struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func, int write)
+{
+ int ret;
+
+ md->sg_len = sg_cnt;
+ md->blocks = req_sz / func_blk_sz;
+ mc->arg |= (*addr & 0x1FFFF) << 9; /* address */
+ mc->arg |= md->blocks & 0x1FF; /* block count */
+ /* incrementing addr for function 1 */
+ if (func->num == 1)
+ *addr += req_sz;
+
+ mmc_set_data_timeout(md, func->card);
+ mmc_wait_for_req(func->card->host, mr);
+
+ ret = mc->error ? mc->error : md->error;
+ if (ret == -ENOMEDIUM) {
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+ } else if (ret != 0) {
+ brcmf_err("CMD53 sg block %s failed %d\n",
+ write ? "write" : "read", ret);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
/**
* brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
@@ -360,11 +391,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
- unsigned int max_req_sz, orig_offset, dst_offset;
- unsigned short max_seg_cnt, seg_sz;
+ unsigned int max_req_sz, src_offset, dst_offset;
unsigned char *pkt_data, *orig_data, *dst_data;
- struct sk_buff *pkt_next = NULL, *local_pkt_next;
struct sk_buff_head local_list, *target_list;
+ struct sk_buff *pkt_next = NULL, *src;
+ unsigned short max_seg_cnt;
struct mmc_request mmc_req;
struct mmc_command mmc_cmd;
struct mmc_data mmc_dat;
@@ -404,9 +435,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
max_req_sz = sdiodev->max_request_size;
max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
target_list->qlen);
- seg_sz = target_list->qlen;
- pkt_offset = 0;
- pkt_next = target_list->next;
memset(&mmc_req, 0, sizeof(struct mmc_request));
memset(&mmc_cmd, 0, sizeof(struct mmc_command));
@@ -425,12 +453,12 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
mmc_req.cmd = &mmc_cmd;
mmc_req.data = &mmc_dat;
- while (seg_sz) {
- req_sz = 0;
- sg_cnt = 0;
- sgl = sdiodev->sgtable.sgl;
- /* prep sg table */
- while (pkt_next != (struct sk_buff *)target_list) {
+ req_sz = 0;
+ sg_cnt = 0;
+ sgl = sdiodev->sgtable.sgl;
+ skb_queue_walk(target_list, pkt_next) {
+ pkt_offset = 0;
+ while (pkt_offset < pkt_next->len) {
pkt_data = pkt_next->data + pkt_offset;
sg_data_sz = pkt_next->len - pkt_offset;
if (sg_data_sz > sdiodev->max_segment_size)
@@ -439,72 +467,55 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
sg_data_sz = max_req_sz - req_sz;
sg_set_buf(sgl, pkt_data, sg_data_sz);
-
sg_cnt++;
+
sgl = sg_next(sgl);
req_sz += sg_data_sz;
pkt_offset += sg_data_sz;
- if (pkt_offset == pkt_next->len) {
- pkt_offset = 0;
- pkt_next = pkt_next->next;
+ if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
+ ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
+ sg_cnt, req_sz, func_blk_sz,
+ &addr, sdiodev, func, write);
+ if (ret)
+ goto exit_queue_walk;
+ req_sz = 0;
+ sg_cnt = 0;
+ sgl = sdiodev->sgtable.sgl;
}
-
- if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
- break;
- }
- seg_sz -= sg_cnt;
-
- if (req_sz % func_blk_sz != 0) {
- brcmf_err("sg request length %u is not %u aligned\n",
- req_sz, func_blk_sz);
- ret = -ENOTBLK;
- goto exit;
- }
-
- mmc_dat.sg_len = sg_cnt;
- mmc_dat.blocks = req_sz / func_blk_sz;
- mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
- mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
- /* incrementing addr for function 1 */
- if (func->num == 1)
- addr += req_sz;
-
- mmc_set_data_timeout(&mmc_dat, func->card);
- mmc_wait_for_req(func->card->host, &mmc_req);
-
- ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
- if (ret == -ENOMEDIUM) {
- brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
- break;
- } else if (ret != 0) {
- brcmf_err("CMD53 sg block %s failed %d\n",
- write ? "write" : "read", ret);
- ret = -EIO;
- break;
}
}
-
+ if (sg_cnt)
+ ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
+ sg_cnt, req_sz, func_blk_sz,
+ &addr, sdiodev, func, write);
+exit_queue_walk:
if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
- local_pkt_next = local_list.next;
- orig_offset = 0;
+ src = __skb_peek(&local_list);
+ src_offset = 0;
skb_queue_walk(pktlist, pkt_next) {
dst_offset = 0;
- do {
- req_sz = local_pkt_next->len - orig_offset;
- req_sz = min_t(uint, pkt_next->len - dst_offset,
- req_sz);
- orig_data = local_pkt_next->data + orig_offset;
+
+ /* This is safe because we must have enough SKB data
+ * in the local list to cover everything in pktlist.
+ */
+ while (1) {
+ req_sz = pkt_next->len - dst_offset;
+ if (req_sz > src->len - src_offset)
+ req_sz = src->len - src_offset;
+
+ orig_data = src->data + src_offset;
dst_data = pkt_next->data + dst_offset;
memcpy(dst_data, orig_data, req_sz);
- orig_offset += req_sz;
- dst_offset += req_sz;
- if (orig_offset == local_pkt_next->len) {
- orig_offset = 0;
- local_pkt_next = local_pkt_next->next;
+
+ src_offset += req_sz;
+ if (src_offset == src->len) {
+ src_offset = 0;
+ src = skb_peek_next(src, &local_list);
}
+ dst_offset += req_sz;
if (dst_offset == pkt_next->len)
break;
- } while (!skb_queue_empty(&local_list));
+ }
}
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f17f602e6171..a8303afa15f1 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -337,8 +337,6 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
return;
}
- wmb(); /* barrier so backend seens requests */
-
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);