aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Makefile7
-rw-r--r--drivers/net/ethernet/alteon/acenic.c5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c117
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c51
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c52
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/cavium/Kconfig6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c225
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c186
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c145
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c376
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h12
-rw-r--r--drivers/net/ethernet/cortina/gemini.c135
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h39
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c39
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c27
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c12
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c21
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig15
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c46
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c358
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c89
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c413
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c97
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c260
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c65
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h134
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c973
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h203
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c703
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c179
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c223
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c213
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h385
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c373
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h193
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c253
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c459
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c374
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c215
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c316
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h124
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c384
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c322
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c250
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c445
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c254
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h43
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c85
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c108
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c87
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c22
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Makefile2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h40
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c262
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h35
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c193
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h2
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h16
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c88
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/Kconfig2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c815
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c9
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c12
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h8
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c12
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c60
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c16
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c39
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c13
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c112
253 files changed, 11941 insertions, 4259 deletions
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8fbfe9ce2fa5..22555e7fa752 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
-obj-$(CONFIG_NET_CADENCE) += cadence/
+obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
@@ -68,7 +68,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
obj-$(CONFIG_LPC_ENET) += nxp/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
obj-$(CONFIG_ETHOC) += ethoc.o
-obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
+obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/
obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
@@ -80,8 +80,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
-obj-$(CONFIG_SFC) += sfc/
-obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
+obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 8f71b79b4949..08945baee48a 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1933,7 +1933,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
while (idx != rxretprd) {
struct ring_info *rip;
struct sk_buff *skb;
- struct rx_desc *rxdesc, *retdesc;
+ struct rx_desc *retdesc;
u32 skbidx;
int bd_flags, desc_type, mapsize;
u16 csum;
@@ -1959,19 +1959,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
mapsize = ACE_STD_BUFSIZE;
- rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
mapsize = ACE_JUMBO_BUFSIZE;
- rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
mapsize = ACE_MINI_BUFSIZE;
- rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
default:
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f2af87d70594..c673ac2df65b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev)
#endif /* CONFIG_NET_POLL_CONTROLLER */
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
@@ -2223,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb);
+ qid = fallback(dev, skb, NULL);
return qid;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index f2d8063a2cef..08c9fa6ca71f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -11,6 +11,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
+#include "aq_vec.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -284,6 +285,117 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
return aq_nic_update_interrupt_moderation_settings(aq_nic);
}
+static int aq_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
+ return -EOPNOTSUPP;
+
+ if (netif_running(ndev))
+ return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+
+ return 0;
+}
+
+static void aq_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ pause->autoneg = 0;
+
+ if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ pause->rx_pause = 1;
+ if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ pause->tx_pause = 1;
+}
+
+static int aq_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ if (!aq_nic->aq_fw_ops->set_flow_control)
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
+
+ if (pause->tx_pause)
+ aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
+ else
+ aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+
+ err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
+
+ return err;
+}
+
+static void aq_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+
+ ring->rx_pending = aq_nic_cfg->rxds;
+ ring->tx_pending = aq_nic_cfg->txds;
+
+ ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
+ ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
+}
+
+static int aq_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ int err = 0;
+ bool ndev_running = false;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+ const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
+ err = -EOPNOTSUPP;
+ goto err_exit;
+ }
+
+ if (netif_running(ndev)) {
+ ndev_running = true;
+ dev_close(ndev);
+ }
+
+ aq_nic_free_vectors(aq_nic);
+
+ aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+ aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
+ aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
+
+ aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+ aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
+ aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
+
+ for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
+ aq_nic->aq_vecs++) {
+ aq_nic->aq_vec[aq_nic->aq_vecs] =
+ aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
+ if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+ if (ndev_running)
+ err = dev_open(ndev);
+
+err_exit:
+ return err;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
@@ -291,6 +403,11 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+ .nway_reset = aq_ethtool_nway_reset,
+ .get_ringparam = aq_get_ringparam,
+ .set_ringparam = aq_set_ringparam,
+ .get_pauseparam = aq_ethtool_get_pauseparam,
+ .set_pauseparam = aq_ethtool_set_pauseparam,
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 2c6ebd91a9f2..5c00671f248d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -24,8 +24,10 @@ struct aq_hw_caps_s {
u64 link_speed_msk;
unsigned int hw_priv_flags;
u32 media_type;
- u32 rxds;
- u32 txds;
+ u32 rxds_max;
+ u32 txds_max;
+ u32 rxds_min;
+ u32 txds_min;
u32 txhwb_alignment;
u32 irq_mask;
u32 vecs;
@@ -98,6 +100,9 @@ struct aq_stats_s {
#define AQ_HW_MEDIA_TYPE_TP 1U
#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+#define AQ_HW_TXD_MULTIPLE 8U
+#define AQ_HW_RXD_MULTIPLE 8U
+
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
struct aq_hw_s {
@@ -199,25 +204,30 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
- int (*hw_deinit)(struct aq_hw_s *self);
-
int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
struct aq_fw_ops {
int (*init)(struct aq_hw_s *self);
+ int (*deinit)(struct aq_hw_s *self);
+
int (*reset)(struct aq_hw_s *self);
+ int (*renegotiate)(struct aq_hw_s *self);
+
int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
- int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+ int (*set_state)(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
int (*update_link_status)(struct aq_hw_s *self);
int (*update_stats)(struct aq_hw_s *self);
+
+ int (*set_flow_control)(struct aq_hw_s *self);
};
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7a22d0257e04..26dc6782b475 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */
- cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
- cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
+ cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
+ cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
/*rss rings */
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
@@ -768,10 +768,14 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
- if (self->aq_nic_cfg.flow_control)
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
+ if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
else
@@ -886,7 +890,7 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) {
- (void)self->aq_hw_ops->hw_deinit(self->aq_hw);
+ (void)self->aq_fw_ops->deinit(self->aq_hw);
} else {
(void)self->aq_hw_ops->hw_set_power(self->aq_hw,
self->power_state);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 8cc6abadc03b..97addfa6f895 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -19,29 +19,31 @@
#include "hw_atl_a0_internal.h"
#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
- .is_64_dma = true, \
- .msix_irqs = 4U, \
- .irq_mask = ~0U, \
- .vecs = HW_ATL_A0_RSS_MAX, \
- .tcs = HW_ATL_A0_TC_MAX, \
- .rxd_alignment = 1U, \
- .rxd_size = HW_ATL_A0_RXD_SIZE, \
- .rxds = 248U, \
- .txd_alignment = 1U, \
- .txd_size = HW_ATL_A0_TXD_SIZE, \
- .txds = 8U * 1024U, \
- .txhwb_alignment = 4096U, \
- .tx_rings = HW_ATL_A0_TX_RINGS, \
- .rx_rings = HW_ATL_A0_RX_RINGS, \
- .hw_features = NETIF_F_HW_CSUM | \
- NETIF_F_RXHASH | \
- NETIF_F_RXCSUM | \
- NETIF_F_SG | \
- NETIF_F_TSO, \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_A0_RSS_MAX, \
+ .tcs = HW_ATL_A0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_A0_RXD_SIZE, \
+ .rxds_max = HW_ATL_A0_MAX_RXD, \
+ .rxds_min = HW_ATL_A0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_A0_TXD_SIZE, \
+ .txds_max = HW_ATL_A0_MAX_TXD, \
+ .txds_min = HW_ATL_A0_MIN_RXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_A0_TX_RINGS, \
+ .rx_rings = HW_ATL_A0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO, \
.hw_priv_flags = IFF_UNICAST_FLT, \
- .flow_control = true, \
- .mtu = HW_ATL_A0_MTU_JUMBO, \
- .mac_regs_count = 88, \
+ .flow_control = true, \
+ .mtu = HW_ATL_A0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
@@ -875,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
- .hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_a0_hw_reset,
.hw_start = hw_atl_a0_hw_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 1d8855558d74..3c94cff57876 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,4 +88,12 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
+#define HW_ATL_A0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_A0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_A0_MAX_RXD 8184U
+#define HW_ATL_A0_MAX_TXD 8184U
+
#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 956860a69797..4809bf4baa34 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -20,30 +20,32 @@
#include "hw_atl_llh_internal.h"
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
- .is_64_dma = true, \
- .msix_irqs = 4U, \
- .irq_mask = ~0U, \
- .vecs = HW_ATL_B0_RSS_MAX, \
- .tcs = HW_ATL_B0_TC_MAX, \
- .rxd_alignment = 1U, \
- .rxd_size = HW_ATL_B0_RXD_SIZE, \
- .rxds = 4U * 1024U, \
- .txd_alignment = 1U, \
- .txd_size = HW_ATL_B0_TXD_SIZE, \
- .txds = 8U * 1024U, \
- .txhwb_alignment = 4096U, \
- .tx_rings = HW_ATL_B0_TX_RINGS, \
- .rx_rings = HW_ATL_B0_RX_RINGS, \
- .hw_features = NETIF_F_HW_CSUM | \
- NETIF_F_RXCSUM | \
- NETIF_F_RXHASH | \
- NETIF_F_SG | \
- NETIF_F_TSO | \
- NETIF_F_LRO, \
- .hw_priv_flags = IFF_UNICAST_FLT, \
- .flow_control = true, \
- .mtu = HW_ATL_B0_MTU_JUMBO, \
- .mac_regs_count = 88, \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_B0_RSS_MAX, \
+ .tcs = HW_ATL_B0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_B0_RXD_SIZE, \
+ .rxds_max = HW_ATL_B0_MAX_RXD, \
+ .rxds_min = HW_ATL_B0_MIN_RXD, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_B0_TXD_SIZE, \
+ .txds_max = HW_ATL_B0_MAX_TXD, \
+ .txds_min = HW_ATL_B0_MIN_TXD, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_B0_TX_RINGS, \
+ .rx_rings = HW_ATL_B0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_LRO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_B0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
@@ -933,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
- .hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_b0_hw_reset,
.hw_start = hw_atl_b0_hw_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 405d1455c222..28568f5fa74b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,6 +142,14 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
+#define HW_ATL_B0_MIN_RXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_B0_MIN_TXD \
+ (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_B0_MAX_RXD 8184U
+#define HW_ATL_B0_MAX_TXD 8184U
+
/* HW layer capabilities */
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index e652d86b87d4..c965e65d07db 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -30,10 +30,11 @@
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
-#define HW_ATL_MPI_STATE_MSK 0x00FFU
-#define HW_ATL_MPI_STATE_SHIFT 0U
-#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U
-#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_STATE_MSK 0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT 0U
+#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U
+#define HW_ATL_MPI_SPEED_SHIFT 16U
+#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
@@ -525,19 +526,20 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
{
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
- val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+ val = val & ~HW_ATL_MPI_SPEED_MSK;
+ val |= speed << HW_ATL_MPI_SPEED_SHIFT;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
return 0;
}
-void hw_atl_utils_mpi_set(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state,
- u32 speed)
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
{
int err = 0;
u32 transaction_id = 0;
struct hw_aq_atl_utils_mbox_header mbox;
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_mbox(self, &mbox);
@@ -551,21 +553,21 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
}
+ /* On interface DEINIT we disable DW (raise bit)
+ * Otherwise enable DW (clear bit)
+ */
+ if (state == MPI_DEINIT || state == MPI_POWER)
+ val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
+ else
+ val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
- aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
- (speed << HW_ATL_MPI_SPEED_SHIFT) | state);
-
-err_exit:;
-}
-
-static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state)
-{
- u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+ /* Set new state bits */
+ val = val & ~HW_ATL_MPI_STATE_MSK;
+ val |= state & HW_ATL_MPI_STATE_MSK;
- val = state | (val & HW_ATL_MPI_SPEED_MSK);
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
- return 0;
+err_exit:
+ return err;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
@@ -721,16 +723,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
*p = chip_features;
}
-int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
+static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
{
- hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
return 0;
}
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
unsigned int power_state)
{
- hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_POWER);
return 0;
}
@@ -823,10 +827,12 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
const struct aq_fw_ops aq_fw_1x_ops = {
.init = hw_atl_utils_mpi_create,
+ .deinit = hw_atl_fw1x_deinit,
.reset = NULL,
.get_mac_permanent = hw_atl_utils_get_mac_permanent,
.set_link_speed = hw_atl_utils_mpi_set_speed,
.set_state = hw_atl_utils_mpi_set_state,
.update_link_status = hw_atl_utils_mpi_get_link_status,
.update_stats = hw_atl_utils_update_stats,
+ .set_flow_control = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index cd8f18f39c61..b875590efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -239,6 +239,41 @@ enum hw_atl_fw2x_caps_hi {
CAPS_HI_TRANSACTION_ID,
};
+enum hw_atl_fw2x_ctrl {
+ CTRL_RESERVED1 = 0x00,
+ CTRL_RESERVED2,
+ CTRL_RESERVED3,
+ CTRL_PAUSE,
+ CTRL_ASYMMETRIC_PAUSE,
+ CTRL_RESERVED4,
+ CTRL_RESERVED5,
+ CTRL_RESERVED6,
+ CTRL_1GBASET_FD_EEE,
+ CTRL_2P5GBASET_FD_EEE,
+ CTRL_5GBASET_FD_EEE,
+ CTRL_10GBASET_FD_EEE,
+ CTRL_THERMAL_SHUTDOWN,
+ CTRL_PHY_LOGS,
+ CTRL_EEE_AUTO_DISABLE,
+ CTRL_PFC,
+ CTRL_WAKE_ON_LINK,
+ CTRL_CABLE_DIAG,
+ CTRL_TEMPERATURE,
+ CTRL_DOWNSHIFT,
+ CTRL_PTP_AVB,
+ CTRL_RESERVED7,
+ CTRL_LINK_DROP,
+ CTRL_SLEEP_PROXY,
+ CTRL_WOL,
+ CTRL_MAC_STOP,
+ CTRL_EXT_LOOPBACK,
+ CTRL_INT_LOOPBACK,
+ CTRL_RESERVED8,
+ CTRL_WOL_TIMER,
+ CTRL_STATISTICS,
+ CTRL_FORCE_RECONNECT,
+};
+
struct aq_hw_s;
struct aq_fw_ops;
struct aq_hw_caps_s;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 39cd3a27fe77..e37943760a58 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -28,6 +28,10 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
+
static int aq_fw2x_init(struct aq_hw_s *self)
{
int err = 0;
@@ -39,6 +43,16 @@ static int aq_fw2x_init(struct aq_hw_s *self)
return err;
}
+static int aq_fw2x_deinit(struct aq_hw_s *self)
+{
+ int err = aq_fw2x_set_link_speed(self, 0);
+
+ if (!err)
+ err = aq_fw2x_set_state(self, MPI_DEINIT);
+
+ return err;
+}
+
static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
{
enum hw_atl_fw2x_rate rate = 0;
@@ -73,10 +87,38 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
return 0;
}
+static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+{
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+ *mpi_state |= BIT(CAPS_HI_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+
+ if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+ *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+ else
+ *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+}
+
static int aq_fw2x_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
- /* No explicit state in 2x fw */
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ switch (state) {
+ case MPI_INIT:
+ mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+ break;
+ case MPI_DEINIT:
+ mpi_state |= BIT(CAPS_HI_LINK_DROP);
+ break;
+ case MPI_RESET:
+ case MPI_POWER:
+ /* No actions */
+ break;
+ }
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
return 0;
}
@@ -173,12 +215,37 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
return hw_atl_utils_update_stats(self);
}
+static int aq_fw2x_renegotiate(struct aq_hw_s *self)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ mpi_opts |= BIT(CTRL_FORCE_RECONNECT);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return 0;
+}
+
+static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
+ return 0;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
.reset = NULL,
+ .renegotiate = aq_fw2x_renegotiate,
.get_mac_permanent = aq_fw2x_get_mac_permanent,
.set_link_speed = aq_fw2x_set_link_speed,
.set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
.update_stats = aq_fw2x_update_stats,
+ .set_flow_control = aq_fw2x_set_flow_control,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index a445de6837a6..94efc6477bdc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -12,8 +12,8 @@
#define NIC_MAJOR_DRIVER_VERSION 2
#define NIC_MINOR_DRIVER_VERSION 0
-#define NIC_BUILD_DRIVER_VERSION 2
-#define NIC_REVISION_DRIVER_VERSION 1
+#define NIC_BUILD_DRIVER_VERSION 3
+#define NIC_REVISION_DRIVER_VERSION 0
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 5e5022fa1d04..6d3221134927 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1279,7 +1279,6 @@ static void alx_check_link(struct alx_priv *alx)
struct alx_hw *hw = &alx->hw;
unsigned long flags;
int old_speed;
- u8 old_duplex;
int err;
/* clear PHY internal interrupt status, otherwise the main
@@ -1288,7 +1287,6 @@ static void alx_check_link(struct alx_priv *alx)
alx_clear_phy_intr(hw);
old_speed = hw->link_speed;
- old_duplex = hw->duplex;
err = alx_read_phy_link(hw);
if (err < 0)
goto reset;
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
index 8ba7f8ff3434..392f564d8fd4 100644
--- a/drivers/net/ethernet/aurora/Kconfig
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -1,5 +1,6 @@
config NET_VENDOR_AURORA
bool "Aurora VLSI devices"
+ default y
help
If you have a network (Ethernet) device belonging to this class,
say Y.
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index e94159507847..c8d1f8fa4713 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -304,12 +304,10 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
again:
do {
- struct nb8800_rx_buf *rxb;
unsigned int len;
next = (last + 1) % RX_DESC_COUNT;
- rxb = &priv->rx_bufs[next];
rxd = &priv->rx_descs[next];
if (!rxd->report)
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 4c3bfde6e8de..b7aa8ad96dfb 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -61,7 +61,7 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
- depends on OF && HAS_IOMEM
+ depends on HAS_IOMEM
select MII
select PHYLIB
select FIXED_PHY
@@ -181,7 +181,7 @@ config BGMAC_PLATFORM
config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
- depends on OF
+ depends on HAS_IOMEM
depends on NET_DSA || !NET_DSA
select MII
select PHYLIB
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a1f60f89e059..631617d95769 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2107,7 +2107,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
return tx_ring->index;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e6ea8e61f96d..4c94d9218bba 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
struct device *dma_dev = bgmac->dma_dev;
int empty_slot;
- bool freed = false;
unsigned bytes_compl = 0, pkts_compl = 0;
/* The last slot that hardware didn't consume yet */
@@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
slot->dma_addr = 0;
ring->start++;
- freed = true;
}
if (!pkts_compl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index af7b5a4d8ba0..5a727d4729da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1932,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return fallback(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a8ce5c55bbb0..0e508e5defce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 22243c480a05..98d4c5a3ff21 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params,
*/
if (!vars->link_up)
break;
+ /* else: fall through */
case LED_MODE_ON:
if (((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall through */
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall through */
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = SPEED_100;
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 57348f2b49a3..71362b7f6040 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
bp->num_queues,
1 + bp->num_cnic_queues);
- /* falling through... */
+ /* fall through */
case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
- /* falling through... */
+ /* fall through */
case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8baf9d3eb4b1..3f4d2c8da21a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall through */
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
@@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
/* DEL command deletes all currently configured MACs */
case BNX2X_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall through */
/* RESTORE command will restore the entire multicast configuration */
case BNX2X_MCAST_CMD_RESTORE:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index dc77bfded865..62da46537734 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1827,6 +1827,7 @@ get_vf:
DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
vf->abs_vfid, qidx);
bnx2x_vf_handle_rss_update_eqe(bp, vf);
+ /* fall through */
case EVENT_RING_OPCODE_VF_FLR:
/* Do nothing for now */
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4394c1162be4..c612d74451a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1727,7 +1727,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
speed);
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
- /* fall thru */
+ /* fall through */
}
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
@@ -3012,13 +3012,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
- if (bp->hwrm_dbg_resp_addr) {
- dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
- bp->hwrm_dbg_resp_addr,
- bp->hwrm_dbg_resp_dma_addr);
-
- bp->hwrm_dbg_resp_addr = NULL;
- }
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3030,12 +3023,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
GFP_KERNEL);
if (!bp->hwrm_cmd_resp_addr)
return -ENOMEM;
- bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
- HWRM_DBG_REG_BUF_SIZE,
- &bp->hwrm_dbg_resp_dma_addr,
- GFP_KERNEL);
- if (!bp->hwrm_dbg_resp_addr)
- netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
return 0;
}
@@ -7991,7 +7978,7 @@ static int bnxt_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
- bp, bp);
+ bp, bp, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 91575ef97c8c..934aa11c82eb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1287,9 +1287,6 @@ struct bnxt {
dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
- void *hwrm_dbg_resp_addr;
- dma_addr_t hwrm_dbg_resp_dma_addr;
-#define HWRM_DBG_REG_BUF_SIZE 128
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 402fa32f7a88..7bd96ab4f7c5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
};
+static const struct bnxt_dl_nvm_param nvm_params[] = {
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
+ BNXT_NVM_SHARED_CFG, 1},
+};
+
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ int msg_len, union devlink_param_value *val)
+{
+ struct hwrm_nvm_variable_input *req = msg;
+ void *data_addr = NULL, *buf = NULL;
+ struct bnxt_dl_nvm_param nvm_param;
+ int bytesize, idx = 0, rc, i;
+ dma_addr_t data_dma_addr;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp))
+ return -EPERM;
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ if (nvm_params[i].id == param_id) {
+ nvm_param = nvm_params[i];
+ break;
+ }
+ }
+
+ if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ idx = bp->pf.port_id;
+ else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
+
+ bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
+ if (nvm_param.num_bits == 1)
+ buf = &val->vbool;
+
+ data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
+ &data_dma_addr, GFP_KERNEL);
+ if (!data_addr)
+ return -ENOMEM;
+
+ req->data_addr = cpu_to_le64(data_dma_addr);
+ req->data_len = cpu_to_le16(nvm_param.num_bits);
+ req->option_num = cpu_to_le16(nvm_param.offset);
+ req->index_0 = cpu_to_le16(idx);
+ if (idx)
+ req->dimensions = cpu_to_le16(1);
+
+ if (req->req_type == HWRM_NVM_SET_VARIABLE)
+ memcpy(data_addr, buf, bytesize);
+
+ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+ if (!rc && req->req_type == HWRM_NVM_GET_VARIABLE)
+ memcpy(buf, data_addr, bytesize);
+
+ dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+ if (rc)
+ return -EIO;
+ return 0;
+}
+
+static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct hwrm_nvm_get_variable_input req = {0};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
+ return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct hwrm_nvm_set_variable_input req = {0};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+ return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static const struct devlink_param bnxt_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
+};
+
int bnxt_dl_register(struct bnxt *bp)
{
struct devlink *dl;
int rc;
- if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
- return 0;
-
- if (bp->hwrm_spec_code < 0x10803) {
- netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
+ if (bp->hwrm_spec_code < 0x10600) {
+ netdev_warn(bp->dev, "Firmware does not support NVM params");
return -ENOTSUPP;
}
@@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp)
}
bnxt_link_bp_to_dl(bp, dl);
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
+ /* Add switchdev eswitch mode setting, if SRIOV supported */
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
+ bp->hwrm_spec_code > 0x10803)
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
- bnxt_link_bp_to_dl(bp, NULL);
- devlink_free(dl);
netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
- return rc;
+ goto err_dl_free;
+ }
+
+ rc = devlink_params_register(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
+ if (rc) {
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ rc);
+ goto err_dl_unreg;
}
return 0;
+
+err_dl_unreg:
+ devlink_unregister(dl);
+err_dl_free:
+ bnxt_link_bp_to_dl(bp, NULL);
+ devlink_free(dl);
+ return rc;
}
void bnxt_dl_unregister(struct bnxt *bp)
@@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
+ devlink_params_unregister(dl, bnxt_dl_params,
+ ARRAY_SIZE(bnxt_dl_params));
devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index e92a35d8b642..2f68dc048390 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
}
}
+#define NVM_OFF_ENABLE_SRIOV 401
+
+enum bnxt_nvm_dir_type {
+ BNXT_NVM_SHARED_CFG = 40,
+ BNXT_NVM_PORT_CFG,
+ BNXT_NVM_FUNC_CFG,
+};
+
+struct bnxt_dl_nvm_param {
+ u16 id;
+ u16 offset;
+ u16 dir_type;
+ u16 num_bits;
+};
+
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 0fe0ea8dce6c..c75d7fa6dab6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -6201,6 +6201,19 @@ struct hwrm_nvm_install_update_cmd_err {
u8 unused_0[7];
};
+struct hwrm_nvm_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ __le16 dimensions;
+ __le16 index_0;
+};
+
/* hwrm_nvm_get_variable_input (size:320b/40B) */
struct hwrm_nvm_get_variable_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 491bd40a254d..139d96c5a023 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1568,22 +1568,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp)
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{
- int rc = 0;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
- break;
-
+ return bnxt_tc_add_flow(bp, src_fid, cls_flower);
case TC_CLSFLOWER_DESTROY:
- rc = bnxt_tc_del_flow(bp, cls_flower);
- break;
-
+ return bnxt_tc_del_flow(bp, cls_flower);
case TC_CLSFLOWER_STATS:
- rc = bnxt_tc_get_flow_stats(bp, cls_flower);
- break;
+ return bnxt_tc_get_flow_stats(bp, cls_flower);
+ default:
+ return -EOPNOTSUPP;
}
- return rc;
}
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 05d405905906..e31f5d803c13 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
bnxt_vf_rep_setup_tc_block_cb,
- vf_rep, vf_rep);
+ vf_rep, vf_rep, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
bnxt_vf_rep_setup_tc_block_cb, vf_rep);
@@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (bp->hwrm_spec_code < 0x10803) {
+ netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
+ rc = -ENOTSUPP;
+ goto done;
+ }
+
if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev,
- "Enable VFs before setting switchdev mode");
+ netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
rc = -EPERM;
goto done;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 1f0e872d0667..0584d07c8c33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
rc = bnxt_xdp_set(bp, xdp->prog);
break;
case XDP_QUERY_PROG:
- xdp->prog_attached = !!bp->xdp_prog;
xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
rc = 0;
break;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4fd829b5e65d..d83233ae4a15 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
- struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
@@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
- req = (struct fcoe_kwqe_destroy *) kwqe;
cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
@@ -4090,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- kfree(cp->csk_tbl);
+ kvfree(cp->csk_tbl);
cp->csk_tbl = NULL;
cnic_free_id_tbl(&cp->csk_port_tbl);
}
@@ -4100,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
u32 port_id;
- cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
- GFP_KERNEL);
+ cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
+ GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
@@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev;
- int func, ret;
+ int ret;
u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp;
cp->func = bp->pf_num;
- func = CNIC_FUNC(cp);
pfid = bp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index aa1374d0af93..d8dad07f826a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -725,6 +725,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
+ /* else: fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -785,6 +786,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
case TG3_APE_LOCK_GPIO:
if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
+ /* else: fall through */
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
if (!tp->pci_fn)
@@ -10719,28 +10721,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
switch (limit) {
case 16:
tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
+ /* fall through */
case 15:
tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
+ /* fall through */
case 14:
tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
+ /* fall through */
case 13:
tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
+ /* fall through */
case 12:
tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
+ /* fall through */
case 11:
tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
+ /* fall through */
case 10:
tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
+ /* fall through */
case 9:
tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
+ /* fall through */
case 8:
tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
+ /* fall through */
case 7:
tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
+ /* fall through */
case 6:
tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
+ /* fall through */
case 5:
tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
+ /* fall through */
case 4:
/* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
case 3:
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 427d65a1a126..b9984015ca8c 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,7 +2,7 @@
# Atmel device configuration
#
-config NET_CADENCE
+config NET_VENDOR_CADENCE
bool "Cadence devices"
depends on HAS_IOMEM
default y
@@ -16,7 +16,7 @@ config NET_CADENCE
the remaining Atmel network card questions. If you say Y, you will be
asked for your specific card in the following questions.
-if NET_CADENCE
+if NET_VENDOR_CADENCE
config MACB
tristate "Cadence MACB/GEM support"
@@ -48,4 +48,4 @@ config MACB_PCI
To compile this driver as a module, choose M here: the module
will be called macb_pci.
-endif # NET_CADENCE
+endif # NET_VENDOR_CADENCE
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 92d88c5f76fb..5f03199a3acf 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -4,7 +4,6 @@
config NET_VENDOR_CAVIUM
bool "Cavium ethernet drivers"
- depends on PCI
default y
---help---
Select this option if you want enable Cavium network support.
@@ -36,7 +35,7 @@ config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT && PCI
select PHYLIB
- select MDIO_THUNDER
+ select MDIO_THUNDER if PCI
select THUNDER_NIC_RGX
---help---
This driver supports programming and controlling of MAC
@@ -46,7 +45,7 @@ config THUNDER_NIC_RGX
tristate "Thunder MAC interface driver (RGX)"
depends on 64BIT && PCI
select PHYLIB
- select MDIO_THUNDER
+ select MDIO_THUNDER if PCI
---help---
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
@@ -67,6 +66,7 @@ config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT && PCI
depends on MAY_USE_DEVLINK
+ depends on PCI
imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 929d485a3a2f..e088dedc1747 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
/* set DPTR */
reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index 9338a0008378..91dce8b8ef7e 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
reg_val =
octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
/* set DPTR */
reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
@@ -379,7 +382,7 @@ void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
mbox_cmd.fn = NULL;
- mbox_cmd.fn_arg = 0;
+ mbox_cmd.fn_arg = NULL;
octeon_mbox_write(oct, &mbox_cmd);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7e8454d3b1ad..8ef87a76692b 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -687,7 +687,7 @@ static void lio_sync_octeon_time(struct work_struct *work)
lt = (struct lio_time *)sc->virtdptr;
/* Get time of the day */
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
lt->sec = ts.tv_sec;
lt->nsec = ts.tv_nsec;
octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
@@ -2631,7 +2631,7 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
@@ -2909,7 +2909,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3068,7 +3068,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.param2 = linkstate;
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.cb_fn = 0;
+ nctrl.cb_fn = NULL;
nctrl.wait_time = LIO_CMD_WAIT_TM;
octnet_send_nic_ctrl_pkt(oct, &nctrl);
@@ -3302,7 +3302,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
{
struct lio *lio = NULL;
struct net_device *netdev;
- u8 mac[6], i, j, *fw_ver;
+ u8 mac[6], i, j, *fw_ver, *micro_ver;
+ unsigned long micro;
+ u32 cur_ver;
struct octeon_soft_command *sc;
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
@@ -3432,6 +3434,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
fw_ver);
}
+ /* extract micro version field; point past '<maj>.<min>.' */
+ micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
+ if (kstrtoul(micro_ver, 10, &micro) != 0)
+ micro = 0;
+ octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
+ octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
+ octeon_dev->fw_info.ver.rev = micro;
+
octeon_swap_8B_data((u64 *)(&resp->cfg_info),
(sizeof(struct liquidio_if_cfg_info)) >> 3);
@@ -3572,9 +3582,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
u8 vfmac[ETH_ALEN];
- random_ether_addr(&vfmac[0]);
- if (__liquidio_set_vf_mac(netdev, j,
- &vfmac[0], false)) {
+ eth_random_addr(vfmac);
+ if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
dev_err(&octeon_dev->pci_dev->dev,
"Error setting VF%d MAC address\n",
j);
@@ -3675,7 +3684,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OCTEON_CN2350_25GB_SUBSYS_ID ||
octeon_dev->subsystem_id ==
OCTEON_CN2360_25GB_SUBSYS_ID) {
- liquidio_get_speed(lio);
+ cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
+ octeon_dev->fw_info.ver.min,
+ octeon_dev->fw_info.ver.rev);
+
+ /* speed control unsupported in f/w older than 1.7.2 */
+ if (cur_ver < OCT_FW_VER(1, 7, 2)) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "speed setting not supported by f/w.");
+ octeon_dev->speed_setting = 25;
+ octeon_dev->no_speed_setting = 1;
+ } else {
+ liquidio_get_speed(lio);
+ }
if (octeon_dev->speed_setting == 0) {
octeon_dev->speed_setting = 25;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7fa0212873ac..b77835724dc8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1693,7 +1693,7 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev,
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
}
return ret;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 7f97ae48efed..0cc2338d8d2a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -902,7 +902,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
*
* Octeon always uses UTC time. so timezone information is not sent.
*/
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
ret = snprintf(boottime, MAX_BOOTTIME_SIZE,
" time_sec=%lld time_nsec=%ld",
(s64)ts.tv_sec, ts.tv_nsec);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 94a4ed88d618..d99ca6ba23a4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -288,8 +288,17 @@ struct oct_fw_info {
*/
u32 app_mode;
char liquidio_firmware_version[32];
+ /* Fields extracted from legacy string 'liquidio_firmware_version' */
+ struct {
+ u8 maj;
+ u8 min;
+ u8 rev;
+ } ver;
};
+#define OCT_FW_VER(maj, min, rev) \
+ (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev)))
+
/* wrappers around work structs */
struct cavium_wk {
struct delayed_work work;
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 1f2e75da28f8..d5d9e47daa4b 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -110,8 +110,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
- dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
- iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+ dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
+ iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
iq->txpciq.u64 = txpciq.u64;
iq->fill_threshold = (u32)conf->db_min;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 135766c4296b..768f584f8392 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1848,7 +1848,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return nicvf_xdp_setup(nic, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nic->xdp_prog;
xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
return 0;
default:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 3c5057868ab3..aaf7985aef4c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -281,12 +281,18 @@ struct cudbg_tid_data {
#define CUDBG_NUM_ULPTX 11
#define CUDBG_NUM_ULPTX_READ 512
+#define CUDBG_NUM_ULPTX_ASIC 6
+#define CUDBG_NUM_ULPTX_ASIC_READ 128
+
+#define CUDBG_ULPTX_LA_REV 1
struct cudbg_ulptx_la {
u32 rdptr[CUDBG_NUM_ULPTX];
u32 wrptr[CUDBG_NUM_ULPTX];
u32 rddata[CUDBG_NUM_ULPTX];
u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
+ u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ];
+ u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC];
};
#define CUDBG_CHAC_PBT_ADDR 0x2800
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 0afcfe99bff3..b1eb843035ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -2586,15 +2586,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ulptx_la *ulptx_la_buff;
+ struct cudbg_ver_hdr *ver_hdr;
u32 i, j;
int rc;
- rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
- ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
+ ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+ ver_hdr->revision = CUDBG_ULPTX_LA_REV;
+ ver_hdr->size = sizeof(struct cudbg_ulptx_la);
+
+ ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
+ sizeof(*ver_hdr));
for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
ULP_TX_LA_RDPTR_0_A +
@@ -2610,6 +2619,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
+
+ for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
+ t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
+ ulptx_la_buff->rdptr_asic[i] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
+ ulptx_la_buff->rddata_asic[i][0] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
+ ulptx_la_buff->rddata_asic[i][1] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
+ ulptx_la_buff->rddata_asic[i][2] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
+ ulptx_la_buff->rddata_asic[i][3] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
+ ulptx_la_buff->rddata_asic[i][4] =
+ t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
+ ulptx_la_buff->rddata_asic[i][5] =
+ t4_read_reg(padap, PM_RX_BASE_ADDR);
+ }
+
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 0dbe2d9e22d6..3da9299cd786 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,6 +46,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/rhashtable.h>
#include <linux/etherdevice.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
@@ -319,6 +320,21 @@ struct vpd_params {
u8 na[MACADDR_LEN + 1];
};
+/* Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct pci_params {
unsigned int vpd_cap_addr;
unsigned char speed;
@@ -346,6 +362,7 @@ struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
+ struct pf_resources pfres;
struct pci_params pci;
struct devlog_params devlog;
enum pcie_memwin drv_memwin;
@@ -521,6 +538,15 @@ enum {
MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
};
+enum {
+ PRIV_FLAG_PORT_TX_VM_BIT,
+};
+
+#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT)
+
+#define PRIV_FLAGS_ADAP 0
+#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM
+
struct adapter;
struct sge_rspq;
@@ -557,6 +583,7 @@ struct port_info {
struct hwtstamp_config tstamp_config;
bool ptp_enable;
struct sched_table *sched_tbl;
+ u32 eth_flags;
};
struct dentry;
@@ -867,6 +894,7 @@ struct adapter {
unsigned int flags;
unsigned int adap_idx;
enum chip_type chip;
+ u32 eth_flags;
int msg_enable;
__be16 vxlan_port;
@@ -956,6 +984,7 @@ struct adapter {
struct chcr_stats_debug chcr_stats;
/* TC flower offload */
+ bool tc_flower_initialized;
struct rhashtable flower_tbl;
struct rhashtable_params flower_ht_params;
struct timer_list flower_stats_timer;
@@ -1333,7 +1362,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void t4_free_sge_resources(struct adapter *adap);
void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
@@ -1555,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 8d751efcb90e..55b46592af28 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -273,7 +273,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
break;
case CUDBG_ULPTX_LA:
- len = sizeof(struct cudbg_ulptx_la);
+ len = sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_ulptx_la);
break;
case CUDBG_UP_CIM_INDIRECT:
n = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c301aaf79d64..631b78baedbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2414,6 +2414,44 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
.release = seq_release_private
};
+static int resources_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ #define S(desc, fmt, var) \
+ seq_printf(seq, "%-60s " fmt "\n", \
+ desc " (" #var "):", pfres->var)
+
+ S("Virtual Interfaces", "%d", nvi);
+ S("Egress Queues", "%d", neq);
+ S("Ethernet Control", "%d", nethctrl);
+ S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+ S("Ingress Queues", "%d", niq);
+ S("Traffic Class", "%d", tc);
+ S("Port Access Rights Mask", "%#x", pmask);
+ S("MAC Address Filters", "%d", nexactf);
+ S("Firmware Command Read Capabilities", "%#x", r_caps);
+ S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+ #undef S
+
+ return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = resources_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
/**
* ethqset2pinfo - return port_info of an Ethernet Queue Set
* @adap: the adapter
@@ -2820,6 +2858,7 @@ static int meminfo_show(struct seq_file *seq, void *v)
{
static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
"MC0:", "MC1:", "HMA:"};
+ unsigned int free_rx_cnt, free_tx_cnt;
struct adapter *adap = seq->private;
struct cudbg_meminfo meminfo;
int i, rc;
@@ -2851,13 +2890,20 @@ static int meminfo_show(struct seq_file *seq, void *v)
mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
meminfo.up_extmem2_hi);
- seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
- meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
- meminfo.rx_pages_data[2]);
-
- seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
- meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
- meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+ for (i = 0, free_rx_cnt = 0; i < 2; i++)
+ free_rx_cnt += FREERXPAGECOUNT_G
+ (t4_read_reg(adap, TP_FLM_FREE_RX_CNT_A));
+ seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
+ meminfo.rx_pages_data[0], free_rx_cnt,
+ meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]);
+
+ for (i = 0, free_tx_cnt = 0; i < 4; i++)
+ free_tx_cnt += FREETXPAGECOUNT_G
+ (t4_read_reg(adap, TP_FLM_FREE_TX_CNT_A));
+ seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
+ meminfo.tx_pages_data[0], free_tx_cnt,
+ meminfo.tx_pages_data[1], meminfo.tx_pages_data[2],
+ meminfo.tx_pages_data[3]);
seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
@@ -2924,6 +2970,169 @@ static const struct file_operations chcr_stats_debugfs_fops = {
.llseek = seq_lseek,
.release = single_release,
};
+
+#define PRINT_ADAP_STATS(string, value) \
+ seq_printf(seq, "%-25s %-20llu\n", (string), \
+ (unsigned long long)(value))
+
+#define PRINT_CH_STATS(string, value) \
+do { \
+ seq_printf(seq, "%-25s ", (string)); \
+ for (i = 0; i < adap->params.arch.nchan; i++) \
+ seq_printf(seq, "%-20llu ", \
+ (unsigned long long)stats.value[i]); \
+ seq_printf(seq, "\n"); \
+} while (0)
+
+#define PRINT_CH_STATS2(string, value) \
+do { \
+ seq_printf(seq, "%-25s ", (string)); \
+ for (i = 0; i < adap->params.arch.nchan; i++) \
+ seq_printf(seq, "%-20llu ", \
+ (unsigned long long)stats[i].value); \
+ seq_printf(seq, "\n"); \
+} while (0)
+
+static void show_tcp_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_tcp_stats v4, v6;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, &v4, &v6, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts);
+ PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs);
+ PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs);
+ PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts);
+ PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs);
+ PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs);
+}
+
+static void show_ddp_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_usm_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_get_usm_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames);
+ PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets);
+ PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops);
+}
+
+static void show_rdma_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_rdma_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_rdma_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod);
+ PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt);
+}
+
+static void show_tp_err_adapter_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_err_stats stats;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_err_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh);
+ PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer);
+}
+
+static void show_cpl_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_cpl_stats stats;
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_cpl_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS("tp_cpl_requests:", req);
+ PRINT_CH_STATS("tp_cpl_responses:", rsp);
+}
+
+static void show_tp_err_channel_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_err_stats stats;
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_err_stats(adap, &stats, false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs);
+ PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs);
+ PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs);
+ PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs);
+ PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops);
+ PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops);
+ PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops);
+ PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops);
+}
+
+static void show_fcoe_stats(struct seq_file *seq)
+{
+ struct adapter *adap = seq->private;
+ struct tp_fcoe_stats stats[NCHAN];
+ u8 i;
+
+ spin_lock(&adap->stats_lock);
+ for (i = 0; i < adap->params.arch.nchan; i++)
+ t4_get_fcoe_stats(adap, i, &stats[i], false);
+ spin_unlock(&adap->stats_lock);
+
+ PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp);
+ PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp);
+ PRINT_CH_STATS2("fcoe_frames_drop", frames_drop);
+}
+
+#undef PRINT_CH_STATS2
+#undef PRINT_CH_STATS
+#undef PRINT_ADAP_STATS
+
+static int tp_stats_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+
+ seq_puts(seq, "\n--------Adapter Stats--------\n");
+ show_tcp_stats(seq);
+ show_ddp_stats(seq);
+ show_rdma_stats(seq);
+ show_tp_err_adapter_stats(seq);
+
+ seq_puts(seq, "\n-------- Channel Stats --------\n");
+ if (adap->params.arch.nchan == NCHAN)
+ seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n",
+ " ", "channel 0", "channel 1",
+ "channel 2", "channel 3");
+ else
+ seq_printf(seq, "%-25s %-20s %-20s\n",
+ " ", "channel 0", "channel 1");
+ show_cpl_stats(seq);
+ show_tp_err_channel_stats(seq);
+ show_fcoe_stats(seq);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
+
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -2973,6 +3182,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
+ { "resources", &resources_debugfs_fops, 0400, 0 },
{ "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 },
{ "ibq_tp0", &cim_ibq_fops, 0400, 0 },
{ "ibq_tp1", &cim_ibq_fops, 0400, 1 },
@@ -2999,6 +3209,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
{ "meminfo", &meminfo_fops, 0400, 0 },
{ "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
+ { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index f7eef93ffc87..d07230c892a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
"db_drop ",
"db_full ",
"db_empty ",
- "tcp_ipv4_out_rsts ",
- "tcp_ipv4_in_segs ",
- "tcp_ipv4_out_segs ",
- "tcp_ipv4_retrans_segs ",
- "tcp_ipv6_out_rsts ",
- "tcp_ipv6_in_segs ",
- "tcp_ipv6_out_segs ",
- "tcp_ipv6_retrans_segs ",
- "usm_ddp_frames ",
- "usm_ddp_octets ",
- "usm_ddp_drops ",
- "rdma_no_rqe_mod_defer ",
- "rdma_no_rqe_pkt_defer ",
- "tp_err_ofld_no_neigh ",
- "tp_err_ofld_cong_defer ",
"write_coal_success ",
"write_coal_fail ",
};
-static char channel_stats_strings[][ETH_GSTRING_LEN] = {
- "--------Channel--------- ",
- "tp_cpl_requests ",
- "tp_cpl_responses ",
- "tp_mac_in_errs ",
- "tp_hdr_in_errs ",
- "tp_tcp_in_errs ",
- "tp_tcp6_in_errs ",
- "tp_tnl_cong_drops ",
- "tp_tnl_tx_drops ",
- "tp_ofld_vlan_drops ",
- "tp_ofld_chan_drops ",
- "fcoe_octets_ddp ",
- "fcoe_frames_ddp ",
- "fcoe_frames_drop ",
-};
-
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"-------Loopback----------- ",
"octets_ok ",
@@ -177,14 +145,19 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"bg3_frames_trunc ",
};
+static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
+};
+
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(stats_strings) +
ARRAY_SIZE(adapter_stats_strings) +
- ARRAY_SIZE(channel_stats_strings) +
ARRAY_SIZE(loopback_stats_strings);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(cxgb4_priv_flags_strings);
default:
return -EOPNOTSUPP;
}
@@ -235,6 +208,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
FW_HDR_FW_VER_MINOR_G(exprom_vers),
FW_HDR_FW_VER_MICRO_G(exprom_vers),
FW_HDR_FW_VER_BUILD_G(exprom_vers));
+ info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
}
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -245,11 +219,11 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
memcpy(data, adapter_stats_strings,
sizeof(adapter_stats_strings));
data += sizeof(adapter_stats_strings);
- memcpy(data, channel_stats_strings,
- sizeof(channel_stats_strings));
- data += sizeof(channel_stats_strings);
memcpy(data, loopback_stats_strings,
sizeof(loopback_stats_strings));
+ } else if (stringset == ETH_SS_PRIV_FLAGS) {
+ memcpy(data, cxgb4_priv_flags_strings,
+ sizeof(cxgb4_priv_flags_strings));
}
}
@@ -270,41 +244,10 @@ struct adapter_stats {
u64 db_drop;
u64 db_full;
u64 db_empty;
- u64 tcp_v4_out_rsts;
- u64 tcp_v4_in_segs;
- u64 tcp_v4_out_segs;
- u64 tcp_v4_retrans_segs;
- u64 tcp_v6_out_rsts;
- u64 tcp_v6_in_segs;
- u64 tcp_v6_out_segs;
- u64 tcp_v6_retrans_segs;
- u64 frames;
- u64 octets;
- u64 drops;
- u64 rqe_dfr_mod;
- u64 rqe_dfr_pkt;
- u64 ofld_no_neigh;
- u64 ofld_cong_defer;
u64 wc_success;
u64 wc_fail;
};
-struct channel_stats {
- u64 cpl_req;
- u64 cpl_rsp;
- u64 mac_in_errs;
- u64 hdr_in_errs;
- u64 tcp_in_errs;
- u64 tcp6_in_errs;
- u64 tnl_cong_drops;
- u64 tnl_tx_drops;
- u64 ofld_vlan_drops;
- u64 ofld_chan_drops;
- u64 octets_ddp;
- u64 frames_ddp;
- u64 frames_drop;
-};
-
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
@@ -327,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap,
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
{
- struct tp_tcp_stats v4, v6;
- struct tp_rdma_stats rdma_stats;
- struct tp_err_stats err_stats;
- struct tp_usm_stats usm_stats;
u64 val1, val2;
memset(s, 0, sizeof(*s));
- spin_lock(&adap->stats_lock);
- t4_tp_get_tcp_stats(adap, &v4, &v6, false);
- t4_tp_get_rdma_stats(adap, &rdma_stats, false);
- t4_get_usm_stats(adap, &usm_stats, false);
- t4_tp_get_err_stats(adap, &err_stats, false);
- spin_unlock(&adap->stats_lock);
-
s->db_drop = adap->db_stats.db_drop;
s->db_full = adap->db_stats.db_full;
s->db_empty = adap->db_stats.db_empty;
- s->tcp_v4_out_rsts = v4.tcp_out_rsts;
- s->tcp_v4_in_segs = v4.tcp_in_segs;
- s->tcp_v4_out_segs = v4.tcp_out_segs;
- s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
- s->tcp_v6_out_rsts = v6.tcp_out_rsts;
- s->tcp_v6_in_segs = v6.tcp_in_segs;
- s->tcp_v6_out_segs = v6.tcp_out_segs;
- s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
-
- if (is_offload(adap)) {
- s->frames = usm_stats.frames;
- s->octets = usm_stats.octets;
- s->drops = usm_stats.drops;
- s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
- s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
- }
-
- s->ofld_no_neigh = err_stats.ofld_no_neigh;
- s->ofld_cong_defer = err_stats.ofld_cong_defer;
-
if (!is_t4(adap->params.chip)) {
int v;
@@ -379,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
}
}
-static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
- u8 i)
-{
- struct tp_cpl_stats cpl_stats;
- struct tp_err_stats err_stats;
- struct tp_fcoe_stats fcoe_stats;
-
- memset(s, 0, sizeof(*s));
-
- spin_lock(&adap->stats_lock);
- t4_tp_get_cpl_stats(adap, &cpl_stats, false);
- t4_tp_get_err_stats(adap, &err_stats, false);
- t4_get_fcoe_stats(adap, i, &fcoe_stats, false);
- spin_unlock(&adap->stats_lock);
-
- s->cpl_req = cpl_stats.req[i];
- s->cpl_rsp = cpl_stats.rsp[i];
- s->mac_in_errs = err_stats.mac_in_errs[i];
- s->hdr_in_errs = err_stats.hdr_in_errs[i];
- s->tcp_in_errs = err_stats.tcp_in_errs[i];
- s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
- s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
- s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
- s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
- s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
- s->octets_ddp = fcoe_stats.octets_ddp;
- s->frames_ddp = fcoe_stats.frames_ddp;
- s->frames_drop = fcoe_stats.frames_drop;
-}
-
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
@@ -429,11 +311,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
data += sizeof(struct adapter_stats) / sizeof(u64);
*data++ = (u64)pi->port_id;
- collect_channel_stats(adapter, (struct channel_stats *)data,
- pi->port_id);
- data += sizeof(struct channel_stats) / sizeof(u64);
-
- *data++ = (u64)pi->port_id;
memset(&s, 0, sizeof(s));
t4_get_lb_stats(adapter, pi->port_id, &s);
@@ -751,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev,
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
- if (netif_carrier_ok(dev)) {
- base->speed = pi->link_cfg.speed;
- base->duplex = DUPLEX_FULL;
- } else {
- base->speed = SPEED_UNKNOWN;
- base->duplex = DUPLEX_UNKNOWN;
- }
+ base->speed = (netif_carrier_ok(dev)
+ ? pi->link_cfg.speed
+ : SPEED_UNKNOWN);
+ base->duplex = DUPLEX_FULL;
if (pi->link_cfg.fc & PAUSE_RX) {
if (pi->link_cfg.fc & PAUSE_TX) {
@@ -1499,6 +1373,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev,
offset, len, &data[eprom->len - len]);
}
+static u32 cxgb4_get_priv_flags(struct net_device *netdev)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+
+ return (adapter->eth_flags | pi->eth_flags);
+}
+
+/**
+ * set_flags - set/unset specified flags if passed in new_flags
+ * @cur_flags: pointer to current flags
+ * @new_flags: new incoming flags
+ * @flags: set of flags to set/unset
+ */
+static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
+{
+ *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
+}
+
+static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+
+ set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
+ set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
+
+ return 0;
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1535,6 +1439,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_dump_data = get_dump_data,
.get_module_info = cxgb4_get_module_info,
.get_module_eeprom = cxgb4_get_module_eeprom,
+ .get_priv_flags = cxgb4_get_priv_flags,
+ .set_priv_flags = cxgb4_set_priv_flags,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index bc03c175a3cd..40cf8dc9f163 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -924,12 +924,14 @@ static int setup_sge_queues(struct adapter *adap)
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
freeout:
+ dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
t4_free_sge_resources(adap);
return err;
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
int txq;
@@ -971,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb) % dev->real_num_tx_queues;
+ return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
@@ -3016,7 +3018,7 @@ static int cxgb_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
- pi, dev);
+ pi, dev, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
return 0;
@@ -3217,7 +3219,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
- .ndo_start_xmit = t4_eth_xmit,
+ .ndo_start_xmit = t4_start_xmit,
.ndo_select_queue = cxgb_select_queue,
.ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
@@ -3536,6 +3538,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
u32 v;
int ret;
+ /* Now that we've successfully configured and initialized the adapter
+ * can ask the Firmware what resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning information\n");
+ return ret;
+ }
+
/* get device capabilities */
memset(c, 0, sizeof(*c));
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
@@ -4170,32 +4182,6 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
- /*
- * Grab VPD parameters. This should be done after we establish a
- * connection to the firmware since some of the VPD parameters
- * (notably the Core Clock frequency) are retrieved via requests to
- * the firmware. On the other hand, we need these fairly early on
- * so we do this right after getting ahold of the firmware.
- */
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
-
- /*
- * Find out what ports are available to us. Note that we need to do
- * this before calling adap_init0_no_config() since it needs nports
- * and portvec ...
- */
- v =
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
- if (ret < 0)
- goto bye;
-
- adap->params.nports = hweight32(port_vec);
- adap->params.portvec = port_vec;
-
/* If the firmware is initialized already, emit a simply note to that
* effect. Otherwise, it's time to try initializing the adapter.
*/
@@ -4246,6 +4232,45 @@ static int adap_init0(struct adapter *adap)
}
}
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning information\n");
+ goto bye;
+ }
+
+ /* Grab VPD parameters. This should be done after we establish a
+ * connection to the firmware since some of the VPD parameters
+ * (notably the Core Clock frequency) are retrieved via requests to
+ * the firmware. On the other hand, we need these fairly early on
+ * so we do this right after getting ahold of the firmware.
+ *
+ * We need to do this after initializing the adapter because someone
+ * could have FLASHed a new VPD which won't be read by the firmware
+ * until we do the RESET ...
+ */
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
+ if (ret < 0)
+ goto bye;
+
+ /* Find out what ports are available to us. Note that we need to do
+ * this before calling adap_init0_no_config() since it needs nports
+ * and portvec ...
+ */
+ v =
+ FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0)
+ goto bye;
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
/* Give the SGE code a chance to pull in anything that it needs ...
* Note that this must be called after we retrieve our VPD parameters
* in order to know how to convert core ticks to seconds, etc.
@@ -4797,10 +4822,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
* of ports we found and the number of available CPUs. Most settings can be
* modified by the admin prior to actual use.
*/
-static void cfg_queues(struct adapter *adap)
+static int cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int i = 0, n10g = 0, qidx = 0;
+ int i, n10g = 0, qidx = 0;
+ int niqflint, neq, avail_eth_qsets;
+ int max_eth_qsets = 32;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
@@ -4812,16 +4839,46 @@ static void cfg_queues(struct adapter *adap)
adap->params.crypto = 0;
}
- n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ /* Calculate the number of Ethernet Queue Sets available based on
+ * resources provisioned for us. We always have an Asynchronous
+ * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
+ * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
+ * Ingress Queue. Meanwhile, we need two Egress Queues for each
+ * Queue Set: one for the Free List and one for the Ethernet TX Queue.
+ *
+ * Note that we should also take into account all of the various
+ * Offload Queues. But, in any situation where we're operating in
+ * a Resource Constrained Provisioning environment, doing any Offload
+ * at all is problematic ...
+ */
+ niqflint = adap->params.pfres.niqflint - 1;
+ if (!(adap->flags & USING_MSIX))
+ niqflint--;
+ neq = adap->params.pfres.neq / 2;
+ avail_eth_qsets = min(niqflint, neq);
+
+ if (avail_eth_qsets > max_eth_qsets)
+ avail_eth_qsets = max_eth_qsets;
+
+ if (avail_eth_qsets < adap->params.nports) {
+ dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
+ avail_eth_qsets, adap->params.nports);
+ return -ENOMEM;
+ }
+
+ /* Count the number of 10Gb/s or better ports */
+ for_each_port(adap, i)
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
* own TX Queue in order to prevent Head-Of-Line Blocking.
*/
- if (adap->params.nports * 8 > MAX_ETH_QSETS) {
- dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
- MAX_ETH_QSETS, adap->params.nports * 8);
- BUG_ON(1);
+ if (adap->params.nports * 8 > avail_eth_qsets) {
+ dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
+ avail_eth_qsets, adap->params.nports * 8);
+ return -ENOMEM;
}
for_each_port(adap, i) {
@@ -4837,7 +4894,7 @@ static void cfg_queues(struct adapter *adap)
* per 10G port.
*/
if (n10g)
- q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+ q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
@@ -4888,6 +4945,8 @@ static void cfg_queues(struct adapter *adap)
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
+
+ return 0;
}
/*
@@ -5628,10 +5687,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ if (!(adapter->flags & FW_OK))
+ goto fw_attach_fail;
+
/* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
- cfg_queues(adapter);
+ err = cfg_queues(adapter);
+ if (err)
+ goto out_free_dev;
adapter->smt = t4_init_smt();
if (!adapter->smt) {
@@ -5703,7 +5767,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
u32 hash_base, hash_reg;
- if (chip <= CHELSIO_T5) {
+ if (chip_ver <= CHELSIO_T5) {
hash_reg = LE_DB_TID_HASHBASE_A;
hash_base = t4_read_reg(adapter, hash_reg);
adapter->tids.hash_base = hash_base / 4;
@@ -5738,6 +5802,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_dev;
}
+fw_attach_fail:
/*
* The card is now ready to go. If any errors occur during device
* registration we do not fail the whole card but rather proceed only
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 3ddd2c4acf68..623f73dd7738 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap)
{
int ret;
+ if (adap->tc_flower_initialized)
+ return -EEXIST;
+
adap->flower_ht_params = cxgb4_tc_flower_ht_params;
ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
if (ret)
@@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap)
INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
+ adap->tc_flower_initialized = true;
return 0;
}
void cxgb4_cleanup_tc_flower(struct adapter *adap)
{
+ if (!adap->tc_flower_initialized)
+ return;
+
if (adap->flower_stats_timer.function)
del_timer_sync(&adap->flower_stats_timer);
cancel_work_sync(&adap->flower_stats_work);
rhashtable_destroy(&adap->flower_tbl);
+ adap->tc_flower_initialized = false;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 9148abb7994c..7fc656680299 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap)
struct port_info *pi = netdev2pinfo(adap->port[j]);
s = pi->sched_tbl;
+ if (!s)
+ continue;
+
for (i = 0; i < s->sched_size; i++) {
struct sched_class *e;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 395e2a0e8d7f..6807bc3a44fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
}
/**
- * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
* @dev: the egress net device
*
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
*/
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid, ctrl0, op;
u64 cntrl, *end, *sgl;
@@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+/* Constants ... */
+enum {
+ /* Egress Queue sizes, producer and consumer indices are all in units
+ * of Egress Context Units bytes. Note that as far as the hardware is
+ * concerned, the free list is an Egress Queue (the host produces free
+ * buffers which the hardware consumes) and free list entries are
+ * 64-bit PCI DMA addresses.
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+ T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+};
+
+/**
+ * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit completely as
+ * immediate data.
+ */
+static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
+{
+ /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+ * which does not accommodate immediate data. We could dike out all
+ * of the support code for immediate data but that would tie our hands
+ * too much if we ever want to enhace the firmware. It would also
+ * create more differences between the PF and VF Drivers.
+ */
+ return false;
+}
+
+/**
+ * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a TX Work Request for the
+ * given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+ if (t4vf_is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+ sizeof(__be64));
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ if (skb_shinfo(skb)->gso_size)
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
+ */
+static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const struct skb_shared_info *ssi;
+ struct fw_eth_tx_pkt_vm_wr *wr;
+ int qidx, credits, max_pkt_len;
+ struct cpl_tx_pkt_core *cpl;
+ const struct port_info *pi;
+ unsigned int flits, ndesc;
+ struct sge_eth_txq *txq;
+ struct adapter *adapter;
+ u64 cntrl, *end;
+ u32 wr_mid;
+ const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci);
+
+ /* The chip minimum packet length is 10 octets but the firmware
+ * command that we are using requires that we copy the Ethernet header
+ * (including the VLAN tag) into the header so we reject anything
+ * smaller than that ...
+ */
+ if (unlikely(skb->len < fw_hdr_copy_len))
+ goto out_free;
+
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
+ /* Figure out which TX Queue we're going to use. */
+ pi = netdev_priv(dev);
+ adapter = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ WARN_ON(qidx >= pi->nqsets);
+ txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+ /* Take this opportunity to reclaim any TX Descriptors whose DMA
+ * transfers have completed.
+ */
+ cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
+
+ /* Calculate the number of flits and TX Descriptors we're going to
+ * need along with how many TX Descriptors will be left over after
+ * we inject our Work Request.
+ */
+ flits = t4vf_calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ /* Not enough room for this packet's Work Request. Stop the
+ * TX Queue and return a "busy" condition. The queue will get
+ * started later on when the firmware informs us that space
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+ dev_err(adapter->pdev_dev,
+ "%s: TX ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!t4vf_is_eth_imm(skb) &&
+ unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ /* We need to map the skb into PCI DMA space (because it can't
+ * be in-lined directly into the Work Request) and the mapping
+ * operation failed. Record the error and drop the packet.
+ */
+ txq->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshold" so stop the TX
+ * Queue now and schedule a request for an SGE Egress Queue
+ * Update message. The queue will get started later on when
+ * the firmware processes this Work Request and sends us an
+ * Egress Queue Status Update message indicating that space
+ * has opened up.
+ */
+ eth_txq_stop(txq);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ /* Start filling in our Work Request. Note that we do _not_ handle
+ * the WR Header wrapping around the TX Descriptor Ring. If our
+ * maximum header size ever exceeds one TX Descriptor, we'll need to
+ * do something else here.
+ */
+ WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = cpu_to_be32(wr_mid);
+ wr->r3[0] = cpu_to_be32(0);
+ wr->r3[1] = cpu_to_be32(0);
+ skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ end = (u64 *)wr + flits;
+
+ /* If this is a Large Send Offload packet we'll put in an LSO CPL
+ * message with an encapsulated TX Packet CPL message. Otherwise we
+ * just use a TX Packet CPL message.
+ */
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(sizeof(*lso) +
+ sizeof(*cpl)));
+ /* Fill in the LSO CPL message. */
+ lso->lso_ctrl =
+ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F |
+ LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = cpu_to_be16(0);
+ lso->mss = cpu_to_be16(ssi->gso_size);
+ lso->seqno_offset = cpu_to_be32(0);
+ if (is_t4(adapter->params.chip))
+ lso->len = cpu_to_be32(skb->len);
+ else
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
+
+ /* Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(lso + 1);
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ txq->tso++;
+ txq->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = (t4vf_is_eth_imm(skb)
+ ? skb->len + sizeof(*cpl)
+ : sizeof(*cpl));
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN_V(len));
+
+ /* Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(adapter->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
+ txq->tx_cso++;
+ } else {
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ }
+ }
+
+ /* If there's a VLAN tag present, add that to the list of things to
+ * do in this Work Request.
+ */
+ if (skb_vlan_tag_present(skb)) {
+ txq->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ /* Fill in the TX Packet CPL message header. */
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->port_id) |
+ TXPKT_PF_V(0));
+ cpl->pack = cpu_to_be16(0);
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ /* Fill in the body of the TX Packet CPL message with either in-lined
+ * data or a Scatter/Gather List.
+ */
+ if (t4vf_is_eth_imm(skb)) {
+ /* In-line the packet's data and free the skb since we don't
+ * need it any longer.
+ */
+ cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
+ dev_consume_skb_any(skb);
+ } else {
+ /* Write the skb's Scatter/Gather list into the TX Packet CPL
+ * message and retain a pointer to the skb so we can free it
+ * later when its DMA completes. (We store the skb pointer
+ * in the Software Descriptor corresponding to the last TX
+ * Descriptor used by the Work Request.)
+ *
+ * The retained skb will be freed when the corresponding TX
+ * Descriptors are reclaimed after their DMAs complete.
+ * However, this could take quite a while since, in general,
+ * the hardware is set up to be lazy about sending DMA
+ * completion notifications to us and we mostly perform TX
+ * reclaims in the transmit routine.
+ *
+ * This is good for performamce but means that we rely on new
+ * TX packets arriving to run the destructors of completed
+ * packets, which open up space in their sockets' send queues.
+ * Sometimes we do not get such new packets causing TX to
+ * stall. A single UDP transmitter is a good example of this
+ * situation. We have a clean up timer that periodically
+ * reclaims completed packets but it doesn't run often enough
+ * (nor do we want it to) to prevent lengthy stalls. A
+ * solution to this problem is to run the destructor early,
+ * after the packet is queued but before it's DMAd. A con is
+ * that we lie to socket memory accounting, but the amount of
+ * extra memory is reasonable (limited by the number of TX
+ * descriptors), the packets do actually get freed quickly by
+ * new packets almost always, and for protocols like TCP that
+ * wait for acks to really free up the data the extra memory
+ * is even less. On the positive side we run the destructors
+ * on the sending CPU rather than on a potentially different
+ * completing CPU, usually a good thing.
+ *
+ * Run the destructor before telling the DMA engine about the
+ * packet to make sure it doesn't complete and get freed
+ * prematurely.
+ */
+ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+ struct sge_txq *tq = &txq->q;
+ int last_desc;
+
+ /* If the Work Request header was an exact multiple of our TX
+ * Descriptor length, then it's possible that the starting SGL
+ * pointer lines up exactly with the end of our TX Descriptor
+ * ring. If that's the case, wrap around to the beginning
+ * here ...
+ */
+ if (unlikely((void *)sgl == (void *)tq->stat)) {
+ sgl = (void *)tq->desc;
+ end = (void *)((void *)tq->desc +
+ ((void *)end - (void *)tq->stat));
+ }
+
+ cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
+ skb_orphan(skb);
+
+ last_desc = tq->pidx + ndesc - 1;
+ if (last_desc >= tq->size)
+ last_desc -= tq->size;
+ tq->sdesc[last_desc].skb = skb;
+ tq->sdesc[last_desc].sgl = sgl;
+ }
+
+ /* Advance our internal TX Queue state, tell the hardware about
+ * the new TX descriptors and return success.
+ */
+ txq_advance(&txq->q, ndesc);
+
+ cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
+ return NETDEV_TX_OK;
+
+out_free:
+ /* An error of some sort happened. Free the TX skb and tell the
+ * OS that we've "dealt" with the packet ...
+ */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
+ return cxgb4_vf_eth_xmit(skb, dev);
+
+ return cxgb4_eth_xmit(skb, dev);
+}
+
/**
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
@@ -3044,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
if (cong >= 0)
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
+ FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
+ : FW_IQ_IQTYPE_OFLD));
if (fl) {
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 3720c3e11ebb..2d9943f90a75 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return 0;
}
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ int v;
+ u32 word;
+
+ /* Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adapter->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /* Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
+ pfres->niq = FW_PFVF_CMD_NIQ_G(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = FW_PFVF_CMD_NEQ_G(word);
+ pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ pfres->tc = FW_PFVF_CMD_TC_G(word);
+ pfres->nvi = FW_PFVF_CMD_NVI_G(word);
+ pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
+ pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
+ pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
+
+ return 0;
+}
+
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index c7f8d0441278..e3adf435913e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -188,6 +188,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 6b55aa2eb2a5..da885881c02f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1502,6 +1502,20 @@
#define TP_MIB_DATA_A 0x7e54
#define TP_INT_CAUSE_A 0x7e74
+#define TP_FLM_FREE_RX_CNT_A 0x7e84
+
+#define FREERXPAGECOUNT_S 0
+#define FREERXPAGECOUNT_M 0x1fffffU
+#define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S)
+#define FREERXPAGECOUNT_G(x) (((x) >> FREERXPAGECOUNT_S) & FREERXPAGECOUNT_M)
+
+#define TP_FLM_FREE_TX_CNT_A 0x7e88
+
+#define FREETXPAGECOUNT_S 0
+#define FREETXPAGECOUNT_M 0x1fffffU
+#define FREETXPAGECOUNT_V(x) ((x) << FREETXPAGECOUNT_S)
+#define FREETXPAGECOUNT_G(x) (((x) >> FREETXPAGECOUNT_S) & FREETXPAGECOUNT_M)
+
#define FLMTXFLSTEMPTY_S 30
#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
@@ -1683,6 +1697,16 @@
#define ULP_TX_LA_RDPTR_0_A 0x8ec0
#define ULP_TX_LA_RDDATA_0_A 0x8ec4
#define ULP_TX_LA_WRPTR_0_A 0x8ec8
+#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70
+
+#define ULP_TX_ASIC_DEBUG_0_A 0x8f74
+#define ULP_TX_ASIC_DEBUG_1_A 0x8f78
+#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c
+#define ULP_TX_ASIC_DEBUG_3_A 0x8f80
+#define ULP_TX_ASIC_DEBUG_4_A 0x8f84
+
+/* registers for module PM_RX */
+#define PM_RX_BASE_ADDR 0x8fc0
#define PMRX_E_PCMD_PAR_ERROR_S 0
#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index f1967cf6d43c..5dc6c4154af8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1472,6 +1472,12 @@ enum fw_iq_type {
FW_IQ_TYPE_NO_FL_INT_CAP
};
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_OTHER,
+ FW_IQ_IQTYPE_NIC,
+ FW_IQ_IQTYPE_OFLD,
+};
+
struct fw_iq_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
@@ -1586,6 +1592,12 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_IQFLINTISCSIC_S 26
#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
+#define FW_IQ_CMD_IQTYPE_S 24
+#define FW_IQ_CMD_IQTYPE_M 0x3
+#define FW_IQ_CMD_IQTYPE_V(x) ((x) << FW_IQ_CMD_IQTYPE_S)
+#define FW_IQ_CMD_IQTYPE_G(x) \
+ (((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M)
+
#define FW_IQ_CMD_FL0CNGCHMAP_S 20
#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6d7404f66f84..1c9ad3630c77 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -46,6 +46,11 @@
#define DRV_NAME "gmac-gemini"
#define DRV_VERSION "1.0"
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
#define HSIZE_8 0x00
#define HSIZE_16 0x01
#define HSIZE_32 0x02
@@ -146,6 +151,7 @@ struct gemini_ethernet {
void __iomem *base;
struct gemini_ethernet_port *port0;
struct gemini_ethernet_port *port1;
+ bool initialized;
spinlock_t irq_lock; /* Locks IRQ-related registers */
unsigned int freeq_order;
@@ -300,23 +306,26 @@ static void gmac_speed_set(struct net_device *netdev)
status.bits.speed = GMAC_SPEED_1000;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
- netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
+ phydev_name(phydev));
break;
case 100:
status.bits.speed = GMAC_SPEED_100;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
+ phydev_name(phydev));
break;
case 10:
status.bits.speed = GMAC_SPEED_10;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+ netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
+ phydev_name(phydev));
break;
default:
- netdev_warn(netdev, "Not supported PHY speed (%d)\n",
- phydev->speed);
+ netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n",
+ phydev->speed, phydev_name(phydev));
}
if (phydev->duplex == DUPLEX_FULL) {
@@ -363,12 +372,6 @@ static int gmac_setup_phy(struct net_device *netdev)
return -ENODEV;
netdev->phydev = phy;
- netdev_info(netdev, "connected to PHY \"%s\"\n",
- phydev_name(phy));
- phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
- (unsigned long)phy->phy_id,
- phy_modes(phy->interface));
-
phy->supported &= PHY_GBIT_FEATURES;
phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
phy->advertising = phy->supported;
@@ -376,19 +379,19 @@ static int gmac_setup_phy(struct net_device *netdev)
/* set PHY interface type */
switch (phy->interface) {
case PHY_INTERFACE_MODE_MII:
- netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ netdev_dbg(netdev,
+ "MII: set GMAC0 to GMII mode, GMAC1 disabled\n");
status.bits.mii_rmii = GMAC_PHY_MII;
- netdev_info(netdev, "connect to MII\n");
break;
case PHY_INTERFACE_MODE_GMII:
- netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ netdev_dbg(netdev,
+ "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n");
status.bits.mii_rmii = GMAC_PHY_GMII;
- netdev_info(netdev, "connect to GMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
- dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+ netdev_dbg(netdev,
+ "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
- netdev_info(netdev, "connect to RGMII\n");
break;
default:
netdev_err(netdev, "Unsupported MII interface\n");
@@ -398,29 +401,63 @@ static int gmac_setup_phy(struct net_device *netdev)
}
writel(status.bits32, port->gmac_base + GMAC_STATUS);
+ if (netif_msg_link(port))
+ phy_attached_info(phy);
+
return 0;
}
-static int gmac_pick_rx_max_len(int max_l3_len)
-{
- /* index = CONFIG_MAXLEN_XXX values */
- static const int max_len[8] = {
- 1536, 1518, 1522, 1542,
- 9212, 10236, 1518, 1518
- };
- int i, n = 5;
+/* The maximum frame length is not logically enumerated in the
+ * hardware, so we do a table lookup to find the applicable max
+ * frame length.
+ */
+struct gmac_max_framelen {
+ unsigned int max_l3_len;
+ u8 val;
+};
+
+static const struct gmac_max_framelen gmac_maxlens[] = {
+ {
+ .max_l3_len = 1518,
+ .val = CONFIG0_MAXLEN_1518,
+ },
+ {
+ .max_l3_len = 1522,
+ .val = CONFIG0_MAXLEN_1522,
+ },
+ {
+ .max_l3_len = 1536,
+ .val = CONFIG0_MAXLEN_1536,
+ },
+ {
+ .max_l3_len = 1542,
+ .val = CONFIG0_MAXLEN_1542,
+ },
+ {
+ .max_l3_len = 9212,
+ .val = CONFIG0_MAXLEN_9k,
+ },
+ {
+ .max_l3_len = 10236,
+ .val = CONFIG0_MAXLEN_10k,
+ },
+};
- max_l3_len += ETH_HLEN + VLAN_HLEN;
+static int gmac_pick_rx_max_len(unsigned int max_l3_len)
+{
+ const struct gmac_max_framelen *maxlen;
+ int maxtot;
+ int i;
- if (max_l3_len > max_len[n])
- return -1;
+ maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN;
- for (i = 0; i < 5; i++) {
- if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
- n = i;
+ for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) {
+ maxlen = &gmac_maxlens[i];
+ if (maxtot <= maxlen->max_l3_len)
+ return maxlen->val;
}
- return n;
+ return -1;
}
static int gmac_init(struct net_device *netdev)
@@ -1276,8 +1313,8 @@ static void gmac_enable_irq(struct net_device *netdev, int enable)
unsigned long flags;
u32 val, mask;
- netdev_info(netdev, "%s device %d %s\n", __func__,
- netdev->dev_id, enable ? "enable" : "disable");
+ netdev_dbg(netdev, "%s device %d %s\n", __func__,
+ netdev->dev_id, enable ? "enable" : "disable");
spin_lock_irqsave(&geth->irq_lock, flags);
mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
@@ -1753,7 +1790,10 @@ static int gmac_open(struct net_device *netdev)
phy_start(netdev->phydev);
err = geth_resize_freeq(port);
- if (err) {
+ /* It's fine if it's just busy, the other port has set up
+ * the freeq in that case.
+ */
+ if (err && (err != -EBUSY)) {
netdev_err(netdev, "could not resize freeq\n");
goto err_stop_phy;
}
@@ -1782,7 +1822,7 @@ static int gmac_open(struct net_device *netdev)
HRTIMER_MODE_REL);
port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
- netdev_info(netdev, "opened\n");
+ netdev_dbg(netdev, "opened\n");
return 0;
@@ -2264,6 +2304,14 @@ static void gemini_port_remove(struct gemini_ethernet_port *port)
static void gemini_ethernet_init(struct gemini_ethernet *geth)
{
+ /* Only do this once both ports are online */
+ if (geth->initialized)
+ return;
+ if (geth->port0 && geth->port1)
+ geth->initialized = true;
+ else
+ return;
+
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
@@ -2354,6 +2402,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->id = id;
port->geth = geth;
port->dev = dev;
+ port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* DMA memory */
dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2410,6 +2459,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
geth->port0 = port;
else
geth->port1 = port;
+
+ /* This will just be done once both ports are up and reset */
+ gemini_ethernet_init(geth);
+
platform_set_drvdata(pdev, port);
/* Set up and register the netdev */
@@ -2423,6 +2476,11 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+ /* We can handle jumbo frames up to 10236 bytes so, let's accept
+ * payloads of 10236 bytes minus VLAN and ethernet header
+ */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
netif_napi_add(netdev, &port->napi, gmac_napi_poll,
@@ -2435,7 +2493,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->mac_addr[0], port->mac_addr[1],
port->mac_addr[2]);
dev_info(dev, "using a random ethernet address\n");
- random_ether_addr(netdev->dev_addr);
+ eth_random_addr(netdev->dev_addr);
}
gmac_write_mac_address(netdev);
@@ -2527,7 +2585,6 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
spin_lock_init(&geth->irq_lock);
spin_lock_init(&geth->freeq_lock);
- gemini_ethernet_init(geth);
/* The children will use this */
platform_set_drvdata(pdev, geth);
@@ -2540,8 +2597,8 @@ static int gemini_ethernet_remove(struct platform_device *pdev)
{
struct gemini_ethernet *geth = platform_get_drvdata(pdev);
- gemini_ethernet_init(geth);
geth_cleanup_freeq(geth);
+ geth->initialized = false;
return 0;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 382891f81e09..7005949dc17b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -185,34 +185,13 @@ static inline void queue_tail_inc(struct be_queue_info *q)
struct be_eq_obj {
struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u32 min_eqd; /* in usecs */
- u32 max_eqd; /* in usecs */
- u32 eqd; /* configured val when aic is off */
- u32 cur_eqd; /* in usecs */
+ struct be_adapter *adapter;
+ struct napi_struct napi;
u8 idx; /* array index */
u8 msix_idx;
u16 spurious_intr;
- struct napi_struct napi;
- struct be_adapter *adapter;
cpumask_var_t affinity_mask;
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define BE_EQ_IDLE 0
-#define BE_EQ_NAPI 1 /* napi owns this EQ */
-#define BE_EQ_POLL 2 /* poll owns this EQ */
-#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
-#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
-#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
-#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
-#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
- unsigned int state;
- spinlock_t lock; /* lock to serialize napi and busy-poll */
-#endif /* CONFIG_NET_RX_BUSY_POLL */
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
@@ -238,7 +217,6 @@ struct be_tx_stats {
u64 tx_vxlan_offload_pkts;
u64 tx_reqs;
u64 tx_compl;
- ulong tx_jiffies;
u32 tx_stops;
u32 tx_drv_drops; /* pkts dropped by driver */
/* the error counters are described in be_ethtool.c */
@@ -261,9 +239,9 @@ struct be_tx_compl_info {
struct be_tx_obj {
u32 db_offset;
+ struct be_tx_compl_info txcp;
struct be_queue_info q;
struct be_queue_info cq;
- struct be_tx_compl_info txcp;
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats;
@@ -458,10 +436,10 @@ struct be_port_resources {
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
struct rss_info {
- u64 rss_flags;
u8 rsstable[RSS_INDIR_TABLE_LEN];
u8 rss_queue[RSS_INDIR_TABLE_LEN];
u8 rss_hkey[RSS_HASH_KEY_LEN];
+ u64 rss_flags;
};
#define BE_INVALID_DIE_TEMP 0xFF
@@ -544,11 +522,13 @@ enum {
};
struct be_error_recovery {
- /* Lancer error recovery variables */
- u8 recovery_retries;
+ union {
+ u8 recovery_retries; /* used for Lancer */
+ u8 recovery_state; /* used for BEx and Skyhawk */
+ };
/* BEx/Skyhawk error recovery variables */
- u8 recovery_state;
+ bool recovery_supported;
u16 ue_to_reset_time; /* Time after UE, to soft reset
* the chip - PF0 only
*/
@@ -556,7 +536,6 @@ struct be_error_recovery {
* of SLIPORT_SEMAPHORE reg
*/
u16 last_err_code;
- bool recovery_supported;
unsigned long probe_time;
unsigned long last_recovery_time;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8f755009ff38..05e4c0bb25f4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3403,9 +3403,11 @@ static int be_msix_register(struct be_adapter *adapter)
int status, i, vec;
for_all_evt_queues(adapter, eqo, i) {
- sprintf(eqo->desc, "%s-q%d", netdev->name, i);
+ char irq_name[IFNAMSIZ+4];
+
+ snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i);
vec = be_msix_vec_get(adapter, eqo);
- status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
+ status = request_irq(vec, be_msix, 0, irq_name, eqo);
if (status)
goto err_msix;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index ab02057ac730..65a22cd9aef2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1171,7 +1171,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
- buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.pass_time_stamp = true;
buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
params.specific_params.non_rx_params.err_fqid = errq->fqid;
@@ -1213,7 +1213,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
buf_prefix_content.pass_prs_result = true;
buf_prefix_content.pass_hash_result = true;
- buf_prefix_content.pass_time_stamp = false;
+ buf_prefix_content.pass_time_stamp = true;
buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
rx_p = &params.specific_params.rx_params;
@@ -1610,14 +1610,28 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
{
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent;
+ struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
const struct qm_sg_entry *sgt;
struct sk_buff **skbh, *skb;
int nr_frags, i;
+ u64 ns;
skbh = (struct sk_buff **)phys_to_virt(addr);
skb = *skbh;
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+ &ns)) {
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else {
+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
+ }
+ }
+
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags;
dma_unmap_single(dev, addr,
@@ -2087,6 +2101,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK;
@@ -2228,6 +2247,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
+ struct skb_shared_hwtstamps *shhwtstamps;
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
const struct qm_fd *fd = &dq->fd;
@@ -2241,6 +2261,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct sk_buff *skb;
int *count_ptr;
void *vaddr;
+ u64 ns;
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
@@ -2305,6 +2326,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (!skb)
return qman_cb_dqrr_consume;
+ if (priv->rx_tstamp) {
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ else
+ dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
+ }
+
skb->protocol = eth_type_trans(skb, net_dev);
if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
@@ -2524,11 +2555,58 @@ static int dpaa_eth_stop(struct net_device *net_dev)
return err;
}
+static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->tx_tstamp = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->tx_tstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ /* Couldn't disable rx/tx timestamping separately.
+ * Do nothing here.
+ */
+ priv->rx_tstamp = false;
+ } else {
+ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+ priv->rx_tstamp = true;
+ /* TS is set for all frame types, not only those requested */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- if (!net_dev->phydev)
- return -EINVAL;
- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ int ret = -EINVAL;
+
+ if (cmd == SIOCGMIIREG) {
+ if (net_dev->phydev)
+ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ }
+
+ if (cmd == SIOCSHWTSTAMP)
+ return dpaa_ts_ioctl(net_dev, rq, cmd);
+
+ return ret;
}
static const struct net_device_ops dpaa_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index bd9422082f83..af320f83c742 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -182,6 +182,9 @@ struct dpaa_priv {
struct dpaa_buffer_layout buf_layout[2];
u16 rx_headroom;
+
+ bool tx_tstamp; /* Tx timestamping enabled */
+ bool rx_tstamp; /* Rx timestamping enabled */
};
/* from dpaa_ethtool.c */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 2f933b6b2f4e..3184c8f7cdd0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -32,6 +32,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/string.h>
+#include <linux/of_platform.h>
+#include <linux/net_tstamp.h>
+#include <linux/fsl/ptp_qoriq.h>
#include "dpaa_eth.h"
#include "mac.h"
@@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int dpaa_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *info)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct device_node *mac_node = dev->of_node;
+ struct device_node *fman_node = NULL, *ptp_node = NULL;
+ struct platform_device *ptp_dev = NULL;
+ struct qoriq_ptp *ptp = NULL;
+
+ info->phc_index = -1;
+
+ fman_node = of_get_parent(mac_node);
+ if (fman_node)
+ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+
+ if (ptp_node)
+ ptp_dev = of_find_device_by_node(ptp_node);
+
+ if (ptp_dev)
+ ptp = platform_get_drvdata(ptp_dev);
+
+ if (ptp)
+ info->phc_index = ptp->phc_index;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
@@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.set_link_ksettings = dpaa_set_link_ksettings,
.get_rxnfc = dpaa_get_rxnfc,
.set_rxnfc = dpaa_set_rxnfc,
+ .get_ts_info = dpaa_get_ts_info,
};
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 36c2d7d6ee1b..7e892b1cbd3d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -99,7 +99,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
unsigned long flags;
u32 val, tempval;
- int inc;
struct timespec64 ts;
u64 ns;
val = 0;
@@ -114,7 +113,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
fep->pps_channel = DEFAULT_PPS_CHANNEL;
fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
- inc = fep->ptp_inc;
spin_lock_irqsave(&fep->tmreg_lock, flags);
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 9530405030a7..c415ac67cb7b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
of_node_put(muram_node);
of_node_put(fm_node);
- err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+ err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
+ "fman", fman);
if (err < 0) {
dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
__func__, irq, err);
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index bfa02e0014ae..935c317fa696 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -41,6 +41,7 @@
/* Frame queue Context Override */
#define FM_FD_CMD_FCO 0x80000000
#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
+#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */
#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
/* TX-Port: Unsupported Format */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 57b1e2b47c0a..1ca543ac8f2c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -123,11 +123,13 @@
#define DTSEC_ECNTRL_R100M 0x00000008
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
+#define TCTRL_TTSE 0x00000040
#define TCTRL_GTS 0x00000020
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_PAL_SHIFT 16
#define RCTRL_GHTX 0x00000400
+#define RCTRL_RTSE 0x00000040
#define RCTRL_GRS 0x00000020
#define RCTRL_MPROM 0x00000008
#define RCTRL_RSF 0x00000004
@@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+{
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 rctrl, tctrl;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ rctrl = ioread32be(&regs->rctrl);
+ tctrl = ioread32be(&regs->tctrl);
+
+ if (enable) {
+ rctrl |= RCTRL_RTSE;
+ tctrl |= TCTRL_TTSE;
+ } else {
+ rctrl &= ~RCTRL_RTSE;
+ tctrl &= ~TCTRL_TTSE;
+ }
+
+ iowrite32be(rctrl, &regs->rctrl);
+ iowrite32be(tctrl, &regs->tctrl);
+
+ return 0;
+}
+
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 1a689adf5a22..5149d96ec2c1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 446a97b792e3..bc6eb30aa20f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
+int memac_set_tstamp(struct fman_mac *memac, bool enable)
+{
+ return 0; /* Always enabled. */
+}
+
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index b5a50338ed9a..b2c671ec0ce7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac,
int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_set_allmulti(struct fman_mac *memac, bool enable);
+int memac_set_tstamp(struct fman_mac *memac, bool enable);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ecbf6187e13a..ee82ee1384eb 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1739,6 +1739,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
}
EXPORT_SYMBOL(fman_port_get_hash_result_offset);
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp)
+{
+ if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE)
+ return -EINVAL;
+
+ *tstamp = be64_to_cpu(*(__be64 *)(data +
+ port->buffer_offsets.time_stamp_offset));
+
+ return 0;
+}
+EXPORT_SYMBOL(fman_port_get_tstamp);
+
static int fman_port_probe(struct platform_device *of_dev)
{
struct fman_port *port;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index e86ca6a34e4e..9dbb69f40121 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port);
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
+
struct fman_port *fman_port_bind(struct device *dev);
#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 284735d4ebe9..40705938eecc 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -44,6 +44,7 @@
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
/* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_EN_TIMESTAMP 0x00100000
#define CMD_CFG_NO_LEN_CHK 0x00020000
#define CMD_CFG_PAUSE_IGNORE 0x00000100
#define CMF_CFG_CRC_FWD 0x00000040
@@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+{
+ struct tgec_regs __iomem *regs = tgec->regs;
+ u32 tmp;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->command_config);
+
+ if (enable)
+ tmp |= CMD_CFG_EN_TIMESTAMP;
+ else
+ tmp &= ~CMD_CFG_EN_TIMESTAMP;
+
+ iowrite32be(tmp, &regs->command_config);
+
+ return 0;
+}
+
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index cbbd3b422a98..3bfd1062b386 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 7b5b95f52c09..a847b9c3b31a 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev)
mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
mac_dev->set_exception = dtsec_set_exception;
mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev)
mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
mac_dev->set_exception = tgec_set_exception;
mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev)
mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
mac_dev->set_exception = memac_set_exception;
mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index b520cec120ee..824a81a9f350 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -68,6 +68,7 @@ struct mac_device {
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
+ int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
struct mac_device *mac_dev);
int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8cb98cae0a6f..395a5266ea30 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -740,7 +740,6 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
u64 class)
{
- unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
unsigned int *local_rqfpr;
unsigned int *local_rqfcr;
@@ -819,7 +818,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
}
priv->cur_filer_idx = l - 1;
- last_rule_idx = l;
/* hash rules */
ethflow_to_filer_rules(priv, ethflow);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 42fca3208c0b..22a817da861e 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
ugeth_vdbg("%s: IN", __func__);
+ netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
{
/* Start from the next BD that should be filled */
struct ucc_geth_private *ugeth = netdev_priv(dev);
+ unsigned int bytes_sent = 0;
+ int howmany = 0;
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
@@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
if (!skb)
break;
-
+ howmany++;
+ bytes_sent += skb->len;
dev->stats.tx_packets++;
dev_consume_skb_any(skb);
@@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->confBd[txQ] = bd;
+ netdev_completed_queue(dev, howmany, bytes_sent);
return 0;
}
@@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev)
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
+ netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
@@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev)
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index fb1a7251f45d..25152715396b 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -85,10 +85,12 @@ config HNS3
drivers(like ODP)to register with HNAE devices and their associated
operations.
+if HNS3
+
config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
+ default m
depends on PCI_MSI
- depends on HNS3
---help---
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
@@ -97,16 +99,15 @@ config HNS3_HCLGE
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
- depends on HNS3 && HNS3_HCLGE && DCB
+ depends on HNS3_HCLGE && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
If unsure, say N.
config HNS3_HCLGEVF
- tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
- depends on PCI_MSI
- depends on HNS3
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
depends on HNS3_HCLGE
---help---
This selects the HNS3 VF drivers network acceleration engine & its hardware
@@ -115,11 +116,13 @@ config HNS3_HCLGEVF
config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
+ default m
depends on 64BIT && PCI
- depends on HNS3
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
devices and their associated operations.
+endif #HNS3
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 340e28211135..14374a856d30 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
hip04_config_fifo(priv);
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
hip04_update_mac_address(ndev);
ret = hip04_alloc_ring(ndev, d);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ef9ef703d13a..948b3e0d18f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -2032,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb);
+ return fallback(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 9d79dad2c6aa..0762ad18fdcc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -8,7 +8,6 @@
*/
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/spinlock.h>
#include "hnae3.h"
@@ -41,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
{
switch (client->type) {
case HNAE3_CLIENT_KNIC:
- hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_UNIC:
- hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_ROCE:
- hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+ hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
default:
break;
@@ -61,16 +60,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
- inited = hnae_get_bit(ae_dev->flag,
+ inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_UNIC:
- inited = hnae_get_bit(ae_dev->flag,
+ inited = hnae3_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_ROCE:
- inited = hnae_get_bit(ae_dev->flag,
- HNAE3_ROCE_CLIENT_INITED_B);
+ inited = hnae3_get_bit(ae_dev->flag,
+ HNAE3_ROCE_CLIENT_INITED_B);
break;
default:
break;
@@ -86,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
- hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
@@ -95,7 +94,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
ret = ae_dev->ops->init_client_instance(client, ae_dev);
if (ret) {
dev_err(&ae_dev->pdev->dev,
- "fail to instantiate client\n");
+ "fail to instantiate client, ret = %d\n", ret);
return ret;
}
@@ -135,7 +134,8 @@ int hnae3_register_client(struct hnae3_client *client)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed for port\n");
+ "match and instantiation failed for port, ret = %d\n",
+ ret);
}
exit:
@@ -185,11 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
ae_dev->ops = ae_algo->ops;
ret = ae_algo->ops->init_ae_dev(ae_dev);
if (ret) {
- dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n");
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
continue;
}
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and
* initialize the figure out client instance
@@ -198,7 +199,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed\n");
+ "match and instantiation failed, ret = %d\n",
+ ret);
}
}
@@ -218,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -232,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_algo->node);
@@ -271,11 +273,12 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
/* ae_dev init should set flag */
ret = ae_dev->ops->init_ae_dev(ae_dev);
if (ret) {
- dev_err(&ae_dev->pdev->dev, "init ae_dev error\n");
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
goto out_err;
}
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break;
}
@@ -286,7 +289,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
- "match and instantiation failed\n");
+ "match and instantiation failed, ret = %d\n",
+ ret);
}
out_err:
@@ -306,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
- if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -317,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
- hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
}
list_del(&ae_dev->node);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 8acb1d116a02..da806fdfbbe6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -62,10 +62,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
- hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
- hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+ hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -167,7 +167,6 @@ struct hnae3_client_ops {
#define HNAE3_CLIENT_NAME_LENGTH 16
struct hnae3_client {
char name[HNAE3_CLIENT_NAME_LENGTH];
- u16 version;
unsigned long state;
enum hnae3_client_type type;
const struct hnae3_client_ops *ops;
@@ -436,7 +435,6 @@ struct hnae3_dcb_ops {
struct hnae3_ae_algo {
const struct hnae3_ae_ops *ops;
struct list_head node;
- char name[HNAE3_CLASS_NAME_SIZE];
const struct pci_device_id *pdev_id_table;
};
@@ -509,17 +507,17 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
};
-#define hnae_set_field(origin, mask, shift, val) \
+#define hnae3_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
(origin) |= ((val) << (shift)) & (mask); \
} while (0)
-#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
-#define hnae_set_bit(origin, shift, val) \
- hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
-#define hnae_get_bit(origin, shift) \
- hnae_get_field((origin), (0x1 << (shift)), (shift))
+#define hnae3_set_bit(origin, shift, val) \
+ hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+#define hnae3_get_bit(origin, shift) \
+ hnae3_get_field((origin), (0x1 << (shift)), (shift))
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 25a73bb2e642..29be96e5cc47 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -239,7 +239,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
- int ret;
+ int i, ret;
+
+ if (kinfo->num_tc <= 1) {
+ netdev_reset_tc(netdev);
+ } else {
+ ret = netdev_set_num_tc(netdev, kinfo->num_tc);
+ if (ret) {
+ netdev_err(netdev,
+ "netdev_set_num_tc fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!kinfo->tc_info[i].enable)
+ continue;
+
+ netdev_set_tc_queue(netdev,
+ kinfo->tc_info[i].tc,
+ kinfo->tc_info[i].tqp_count,
+ kinfo->tc_info[i].tqp_offset);
+ }
+ }
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
@@ -312,7 +333,9 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo;
+ int i, ret;
netif_carrier_off(netdev);
@@ -327,6 +350,12 @@ static int hns3_nic_net_open(struct net_device *netdev)
return ret;
}
+ kinfo = &h->kinfo;
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ netdev_set_prio_tc_map(netdev, i,
+ kinfo->prio_tc[i]);
+ }
+
priv->ae_handle->last_reset_time = jiffies;
return 0;
}
@@ -493,8 +522,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* find the txbd field values */
*paylen = skb->len - hdr_len;
- hnae_set_bit(*type_cs_vlan_tso,
- HNS3_TXD_TSO_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso,
+ HNS3_TXD_TSO_B, 1);
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
@@ -586,21 +615,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute L2 header size for normal packet, defined in 2 Bytes */
l2_len = l3.hdr - skb->data;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/
if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len;
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, ol2_len >> 1);
/* compute OL3 header size, defined in 4 Bytes */
ol3_len = l4.hdr - l3.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, ol3_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
@@ -609,16 +638,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute OL4 header size, defined in 4 Bytes. */
ol4_len = l2_hdr - l4.hdr;
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, ol4_len >> 2);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+ ol4_len >> 2);
/* switch IP header ptr from outer to inner header */
l3.hdr = skb_inner_network_header(skb);
/* compute inner l2 header size, defined in 2 Bytes. */
l2_len = l3.hdr - l2_hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
- HNS3_TXD_L2LEN_S, l2_len >> 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+ HNS3_TXD_L2LEN_S, l2_len >> 1);
} else {
/* skb packet types not supported by hardware,
* txbd len fild doesn't be filled.
@@ -634,22 +664,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr;
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
- HNS3_TXD_L3LEN_S, l3_len >> 2);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
+ HNS3_TXD_L3LEN_S, l3_len >> 2);
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, l4.tcp->doff);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S, l4.tcp->doff);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
case IPPROTO_UDP:
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
- HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+ HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
default:
/* skb packet types not supported by hardware,
@@ -703,32 +735,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* define outer network header type.*/
if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb))
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
else
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
- HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
+ hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
+ HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
}
/* define tunnel type(OL4).*/
switch (l4_proto) {
case IPPROTO_UDP:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
break;
case IPPROTO_GRE:
- hnae_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_M,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
+ hnae3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_TUNTYPE_M,
+ HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -749,43 +783,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
}
if (l3.v4->version == 4) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
if (skb_is_gso(skb))
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
-
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
} else if (l3.v6->version == 6) {
- hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
- HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
- hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+ HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
}
switch (l4_proto) {
case IPPROTO_TCP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_TCP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
break;
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_UDP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
break;
case IPPROTO_SCTP:
- hnae_set_field(*type_cs_vlan_tso,
- HNS3_TXD_L4T_M,
- HNS3_TXD_L4T_S,
- HNS3_L4T_SCTP);
+ hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hnae3_set_field(*type_cs_vlan_tso,
+ HNS3_TXD_L4T_M,
+ HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -807,11 +841,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
- HNS3_TXD_BDTYPE_S, 0);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
- hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
+ HNS3_TXD_BDTYPE_S, 0);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
+ hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+ hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -844,10 +878,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
} else {
- hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -880,7 +914,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
- __be16 protocol;
u8 ol4_proto;
u8 il4_proto;
int ret;
@@ -909,7 +942,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
- protocol = skb->protocol;
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
@@ -1135,7 +1167,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
wmb(); /* Commit all data before submit */
- hnae_queue_xmit(ring->tqp, buf_num);
+ hnae3_queue_xmit(ring->tqp, buf_num);
return NETDEV_TX_OK;
@@ -1304,7 +1336,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw;
bool if_running;
- unsigned int i;
int ret;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
@@ -1328,24 +1359,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
goto out;
- if (tc <= 1) {
- netdev_reset_tc(netdev);
- } else {
- ret = netdev_set_num_tc(netdev, tc);
- if (ret)
- goto out;
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (!kinfo->tc_info[i].enable)
- continue;
-
- netdev_set_tc_queue(netdev,
- kinfo->tc_info[i].tc,
- kinfo->tc_info[i].tqp_count,
- kinfo->tc_info[i].tqp_offset);
- }
- }
-
ret = hns3_nic_set_real_num_queue(netdev);
out:
@@ -1703,7 +1716,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae_page_order(ring);
+ unsigned int order = hnae3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -1714,7 +1727,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae_page_size(ring);
+ cb->length = hnae3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -1780,33 +1793,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
/* free desc along with its attached buffer */
static void hns3_free_desc(struct hns3_enet_ring *ring)
{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
hns3_free_buffers(ring);
- dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{
int size = ring->desc_num * sizeof(ring->desc[0]);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
@@ -1887,7 +1894,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
- /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean);
@@ -1917,7 +1924,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
if (is_ring_empty(ring) || head == ring->next_to_clean)
return true; /* no data to poll */
- if (!is_valid_clean_head(ring, head)) {
+ if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
@@ -2016,15 +2023,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) &&
- hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
+ hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
- truesize = hnae_buf_size(ring);
+ truesize = hnae3_buf_size(ring);
if (!twobufs)
- last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2076,13 +2083,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
/* check if hardware has done checksum */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
- hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
+ hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
netdev_err(netdev, "L3/L4 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
@@ -2091,23 +2098,24 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
- l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
- HNS3_RXD_L4ID_S);
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
- ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
+ ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE:
skb->csum_level = 1;
case HNS3_OL4_TYPE_NO_TUN:
/* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
- if (l3_type == HNS3_L3_TYPE_IPV4 ||
- (l3_type == HNS3_L3_TYPE_IPV6 &&
- (l4_type == HNS3_L4_TYPE_UDP ||
- l4_type == HNS3_L4_TYPE_TCP ||
- l4_type == HNS3_L4_TYPE_SCTP)))
+ if ((l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) &&
+ (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP))
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
@@ -2135,8 +2143,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
- switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
- HNS3_RXD_STRP_TAGP_S)) {
+ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
break;
@@ -2174,7 +2182,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Check valid BD */
- if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
return -EFAULT;
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
@@ -2229,7 +2237,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
- while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2257,7 +2265,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
vlan_tag);
}
- if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
+ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]);
u64_stats_update_begin(&ring->syncp);
@@ -2269,7 +2277,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
if (unlikely((!desc->rx.pkt_len) ||
- hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
+ hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
netdev_err(netdev, "truncated pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.err_pkt_len++;
@@ -2279,7 +2287,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return -EFAULT;
}
- if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
+ if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
netdev_err(netdev, "L2 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l2_err++;
@@ -2532,10 +2540,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
tx_ring = tqp_vector->tx_group.ring;
if (tx_ring) {
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2549,12 +2557,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2564,10 +2572,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
if (!tx_ring && rx_ring) {
cur_chain->next = NULL;
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2579,10 +2587,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain;
chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
cur_chain = chain;
@@ -2745,10 +2753,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (ret)
return ret;
- ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
- if (ret)
- return ret;
-
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
@@ -2809,7 +2813,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->io_base = q->io_base;
}
- hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
+ hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
ring->tqp = q;
ring->desc = NULL;
@@ -3081,7 +3085,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->reset_level = HNAE3_NONE_RESET;
priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
@@ -3102,6 +3105,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ if (handle->flags & HNAE3_SUPPORT_VF)
+ handle->reset_level = HNAE3_VF_RESET;
+ else
+ handle->reset_level = HNAE3_FUNC_RESET;
+
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3208,7 +3216,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
struct net_device *ndev = kinfo->netdev;
bool if_running;
int ret;
- u8 i;
if (tc > HNAE3_MAX_TC)
return -EINVAL;
@@ -3218,10 +3225,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if_running = netif_running(ndev);
- ret = netdev_set_num_tc(ndev, tc);
- if (ret)
- return ret;
-
if (if_running) {
(void)hns3_nic_net_stop(ndev);
msleep(100);
@@ -3232,27 +3235,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (ret)
goto err_out;
- if (tc <= 1) {
- netdev_reset_tc(ndev);
- goto out;
- }
-
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
-
- if (tc_info->enable)
- netdev_set_tc_queue(ndev,
- tc_info->tc,
- tc_info->tqp_count,
- tc_info->tqp_offset);
- }
-
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- netdev_set_prio_tc_map(ndev, i,
- kinfo->prio_tc[i]);
- }
-
-out:
ret = hns3_nic_set_real_num_queue(ndev);
err_out:
@@ -3418,7 +3400,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
struct net_device *ndev = kinfo->netdev;
if (!netif_running(ndev))
- return -EIO;
+ return 0;
return hns3_nic_net_stop(ndev);
}
@@ -3458,10 +3440,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- ret = hns3_get_ring_config(priv);
- if (ret)
- return ret;
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
return ret;
@@ -3493,10 +3471,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret)
netdev_err(netdev, "uninit ring error\n");
- hns3_put_ring_config(priv);
-
- priv->ring_data = NULL;
-
hns3_uninit_mac_addr(netdev);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 3b083d5ae9ce..bf9aa02be994 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector {
u16 num_tqps; /* total number of tqps in TQP vector */
- cpumask_t affinity_mask;
char name[HNAE3_INT_NAME_LEN];
/* when 0 should adjust interrupt coalesce parameter */
@@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
-#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
+#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
@@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae_buf_size(_ring) ((_ring)->buf_size)
-#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
-#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+#define hnae3_buf_size(_ring) ((_ring)->buf_size)
+#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
+#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 40c0425b4023..11620e003a8e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -201,7 +201,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
rx_group = &ring->tqp_vector->rx_group;
pre_rx_pkt = rx_group->total_packets;
+ preempt_disable();
hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
+ preempt_enable();
rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
rx_group->total_packets = pre_rx_pkt;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index c36d64710fa6..cf40afca66db 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -18,8 +18,7 @@
#include "hclge_main.h"
#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
-#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
- DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclge_ring_space(struct hclge_cmq_ring *ring)
@@ -46,31 +45,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+ size, &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
{
- dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- DMA_BIDIRECTIONAL);
+ int size = ring->desc_num * sizeof(struct hclge_desc);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
@@ -111,8 +103,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read)
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
}
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
@@ -123,24 +113,24 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
if (ring->flag == HCLGE_TYPE_CSQ) {
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
- (u32)dma);
+ lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
- (u32)((dma >> 31) >> 1));
+ upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
HCLGE_NIC_CMQ_ENABLE);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
- (u32)dma);
+ lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
- (u32)((dma >> 31) >> 1));
+ upper_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
HCLGE_NIC_CMQ_ENABLE);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
}
@@ -152,33 +142,22 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{
- struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
- u16 ntc = csq->next_to_clean;
- struct hclge_desc *desc;
- int clean = 0;
u32 head;
+ int clean;
- desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
return 0;
}
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
- }
- csq->next_to_clean = ntc;
-
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
return clean;
}
@@ -216,7 +195,7 @@ static bool hclge_is_special_opcode(u16 opcode)
**/
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
- struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_desc *desc_to_use;
bool complete = false;
u32 timeout = 0;
@@ -227,7 +206,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclge_ring_space(&hw->cmq.csq)) {
+ if (num > hclge_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -256,33 +236,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
*/
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
- if (hclge_cmd_csq_done(hw))
+ if (hclge_cmd_csq_done(hw)) {
+ complete = true;
break;
+ }
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
+ if (!complete) {
+ retval = -EAGAIN;
+ } else {
handle = 0;
while (handle < num) {
/* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use;
- pr_debug("Get cmd desc:\n");
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
- if ((enum hclge_cmd_return_status)desc_ret ==
- HCLGE_CMD_EXEC_SUCCESS)
+ if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else
retval = -EIO;
- hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
+ hw->cmq.last_status = desc_ret;
ntc++;
handle++;
if (ntc == hw->cmq.csq.desc_num)
@@ -290,9 +271,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
}
}
- if (!complete)
- retval = -EAGAIN;
-
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
if (handle != num) {
@@ -369,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
spin_lock_init(&hdev->hw.cmq.crq.lock);
hclge_cmd_init_regs(&hdev->hw);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d9aaa76c76eb..656c3e622ec8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -571,7 +571,8 @@ struct hclge_config_auto_neg_cmd {
struct hclge_config_max_frm_size_cmd {
__le16 max_frm_size;
- u8 rsv[22];
+ u8 min_frm_size;
+ u8 rsv[21];
};
enum hclge_mac_vlan_tbl_opcode {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d318d35e598f..266c68607e53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -939,8 +939,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) {
hdev->num_roce_msi =
- hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
@@ -948,8 +948,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
} else {
hdev->num_msi =
- hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
}
return 0;
@@ -1038,38 +1038,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
- cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_VMDQ_M,
- HCLGE_CFG_VMDQ_S);
- cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
- cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
- HCLGE_CFG_TQP_DESC_N_M,
- HCLGE_CFG_TQP_DESC_N_S);
-
- cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_PHY_ADDR_M,
- HCLGE_CFG_PHY_ADDR_S);
- cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_MEDIA_TP_M,
- HCLGE_CFG_MEDIA_TP_S);
- cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_RX_BUF_LEN_M,
- HCLGE_CFG_RX_BUF_LEN_S);
+ cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_VMDQ_M,
+ HCLGE_CFG_VMDQ_S);
+ cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
+ cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TQP_DESC_N_M,
+ HCLGE_CFG_TQP_DESC_N_S);
+
+ cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_PHY_ADDR_M,
+ HCLGE_CFG_PHY_ADDR_S);
+ cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_MEDIA_TP_M,
+ HCLGE_CFG_MEDIA_TP_S);
+ cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_RX_BUF_LEN_M,
+ HCLGE_CFG_RX_BUF_LEN_S);
/* get mac_address */
mac_addr_tmp = __le32_to_cpu(req->param[2]);
- mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_MAC_ADDR_H_M,
- HCLGE_CFG_MAC_ADDR_H_S);
+ mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_MAC_ADDR_H_M,
+ HCLGE_CFG_MAC_ADDR_H_S);
mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
- cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_DEFAULT_SPEED_M,
- HCLGE_CFG_DEFAULT_SPEED_S);
- cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
- HCLGE_CFG_RSS_SIZE_M,
- HCLGE_CFG_RSS_SIZE_S);
+ cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_DEFAULT_SPEED_M,
+ HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1077,9 +1077,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]);
- cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
- HCLGE_CFG_SPEED_ABILITY_M,
- HCLGE_CFG_SPEED_ABILITY_S);
+ cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_M,
+ HCLGE_CFG_SPEED_ABILITY_S);
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1098,11 +1098,11 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
req = (struct hclge_cfg_param_cmd *)desc[i].data;
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
true);
- hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
- HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
+ hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
+ HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
/* Len should be united by 4 bytes when send to hardware */
- hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
- HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
+ hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
+ HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
req->offset = cpu_to_le32(offset);
}
@@ -1189,7 +1189,7 @@ static int hclge_configure(struct hclge_dev *hdev)
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae_set_bit(hdev->hw_tc_map, i, 1);
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
@@ -1208,13 +1208,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
tso_mss = 0;
- hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_min);
+ hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+ HCLGE_TSO_MSS_MIN_S, tso_mss_min);
req->tso_mss_min = cpu_to_le16(tso_mss);
tso_mss = 0;
- hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
- HCLGE_TSO_MSS_MIN_S, tso_mss_max);
+ hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+ HCLGE_TSO_MSS_MIN_S, tso_mss_max);
req->tso_mss_max = cpu_to_le16(tso_mss);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1834,8 +1834,6 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
return 0;
}
-#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
-
static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
@@ -1863,13 +1861,11 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
req->tc_wl[j].high =
cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->tc_wl[j].low =
cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
@@ -1911,13 +1907,11 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
req->com_thrd[j].high =
cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_thrd[j].low =
cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
- HCLGE_RX_PRIV_EN_B);
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
@@ -1943,14 +1937,10 @@ static int hclge_common_wl_config(struct hclge_dev *hdev,
req = (struct hclge_rx_com_wl *)desc.data;
req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
- req->com_wl.high |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
- HCLGE_RX_PRIV_EN_B);
+ req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
- req->com_wl.low |=
- cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
- HCLGE_RX_PRIV_EN_B);
+ req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2118,48 +2108,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
- hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
switch (speed) {
case HCLGE_MAC_SPEED_10M:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 6);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 6);
break;
case HCLGE_MAC_SPEED_100M:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 7);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 7);
break;
case HCLGE_MAC_SPEED_1G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 0);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 0);
break;
case HCLGE_MAC_SPEED_10G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 1);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 1);
break;
case HCLGE_MAC_SPEED_25G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 2);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 2);
break;
case HCLGE_MAC_SPEED_40G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 3);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 3);
break;
case HCLGE_MAC_SPEED_50G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 4);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 4);
break;
case HCLGE_MAC_SPEED_100G:
- hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, 5);
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 5);
break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL;
}
- hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
- 1);
+ hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
+ 1);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2201,9 +2191,9 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return ret;
}
- *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
- speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
- HCLGE_QUERY_SPEED_S);
+ *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
+ speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
+ HCLGE_QUERY_SPEED_S);
ret = hclge_parse_speed(speed_tmp, speed);
if (ret) {
@@ -2225,7 +2215,7 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
req = (struct hclge_config_auto_neg_cmd *)desc.data;
- hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2269,8 +2259,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
- hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
- mask_vlan ? 1 : 0);
+ hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
ether_addr_copy(req->mac_mask, mac_mask);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2505,7 +2495,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
u32 cmdq_src_reg;
/* fetch the events from their corresponding regs */
- rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
/* Assumption: If by any chance reset and mailbox events are reported
@@ -2517,12 +2507,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
@@ -2614,6 +2606,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
{
+ if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
hdev->num_msi_left += 1;
hdev->num_msi_used -= 1;
@@ -2705,7 +2703,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
}
val = hclge_read_dev(&hdev->hw, reg);
- while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+ while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
val = hclge_read_dev(&hdev->hw, reg);
cnt++;
@@ -2727,8 +2725,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
- hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
- hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+ hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
req->fun_reset_vfid = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2747,13 +2744,13 @@ static void hclge_do_reset(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+ hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Global Reset requested\n");
break;
case HNAE3_CORE_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
- hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
+ hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Core Reset requested\n");
break;
@@ -2810,8 +2807,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
break;
default:
- dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
- hdev->reset_type);
break;
}
@@ -2824,16 +2819,17 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
- /* perform reset of the stack & ae device for a client */
+ struct hnae3_handle *handle;
+ /* perform reset of the stack & ae device for a client */
+ handle = &hdev->vport[0].nic;
+ rtnl_lock();
hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
hclge_reset_ae_dev(hdev->ae_dev);
hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- rtnl_unlock();
hclge_clear_reset_cause(hdev);
} else {
@@ -2843,6 +2839,8 @@ static void hclge_reset(struct hclge_dev *hdev)
}
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ handle->last_reset_time = jiffies;
+ rtnl_unlock();
}
static void hclge_reset_event(struct hnae3_handle *handle)
@@ -2855,8 +2853,13 @@ static void hclge_reset_event(struct hnae3_handle *handle)
* know this if last reset request did not occur very recently (watchdog
* timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
* In case of new request we reset the "reset level" to PF reset.
+ * And if it is a repeat reset request of the most recent one then we
+ * want to make sure we throttle the reset request. Therefore, we will
+ * not allow it again before 3*HZ times.
*/
- if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
+ if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ return;
+ else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
handle->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
@@ -2868,8 +2871,6 @@ static void hclge_reset_event(struct hnae3_handle *handle)
if (handle->reset_level < HNAE3_GLOBAL_RESET)
handle->reset_level++;
-
- handle->last_reset_time = jiffies;
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -3110,11 +3111,11 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u16 mode = 0;
- hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+ hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
+ HCLGE_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
+ HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
req->rss_tc_mode[i] = cpu_to_le16(mode);
}
@@ -3491,16 +3492,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
i = 0;
for (node = ring_chain; node; node = node->next) {
tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
- hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
- HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
- HCLGE_TQP_ID_S, node->tqp_index);
- hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S));
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -3648,20 +3649,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
- hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3689,7 +3690,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
@@ -3953,10 +3954,10 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
req = (struct hclge_mta_filter_mode_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
- hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
- enable);
- hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
+ hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
+ enable);
+ hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
+ HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -3980,8 +3981,8 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
- hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
- enable);
+ hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
+ enable);
req->function_id = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -4007,10 +4008,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
- hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
+ hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
- hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
+ hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
+ HCLGE_CFG_MTA_ITEM_IDX_S, idx);
req->item_idx = cpu_to_le16(item_idx);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -4257,17 +4258,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-
- hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
- hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
- hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
- HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
- hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
- HCLGE_MAC_EPORT_PFID_S, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
req.egress_port = cpu_to_le16(egress_port);
@@ -4318,8 +4312,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
@@ -4351,10 +4345,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4418,10 +4412,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4802,19 +4796,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
- vcfg->accept_tag1 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
- vcfg->accept_untag1 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
- vcfg->accept_tag2 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
- vcfg->accept_untag2 ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
- vcfg->insert_tag1_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
- vcfg->insert_tag2_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] =
@@ -4840,14 +4834,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
- vcfg->strip_tag1_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
- vcfg->strip_tag2_en ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
- vcfg->vlan1_vlan_prionly ? 1 : 0);
- hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
- vcfg->vlan2_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] =
@@ -4999,6 +4993,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->min_frm_size = HCLGE_MAC_MIN_FRAME;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -5043,7 +5038,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
- hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -5073,7 +5068,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
@@ -5380,12 +5375,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
- mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
- HCLGE_PHY_MDIX_CTRL_S);
+ mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
+ HCLGE_PHY_MDIX_CTRL_S);
retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
- mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
- is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
+ mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
+ is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
@@ -5531,7 +5526,6 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->back = hdev;
hw->io_base = pcim_iomap(pdev, 2, 0);
if (!hw->io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
@@ -5562,6 +5556,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pci_disable_device(pdev);
}
+static void hclge_state_init(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_state_uninit(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+ cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -5702,12 +5720,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
- set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
- set_bit(HCLGE_STATE_DOWN, &hdev->state);
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+ hclge_state_init(hdev);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -5812,16 +5825,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
- set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
+ hclge_state_uninit(hdev);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
@@ -6149,8 +6153,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
req = (struct hclge_set_led_state_cmd *)desc.data;
- hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
- HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+ hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -6280,7 +6284,6 @@ static const struct hnae3_ae_ops hclge_ops = {
static struct hnae3_ae_algo ae_algo = {
.ops = &hclge_ops,
- .name = HCLGE_NAME,
.pdev_id_table = ae_algo_pci_tbl,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7488534528cd..a5abf8ee9b96 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -89,6 +89,7 @@
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
+#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
#define HCLGE_GLOBAL_RESET_BIT 0x0
#define HCLGE_CORE_RESET_BIT 0x1
@@ -128,6 +129,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING,
+ HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_MAX
};
@@ -190,7 +192,6 @@ struct hclge_hw {
int num_vec;
struct hclge_cmq cmq;
struct hclge_caps caps;
- void *back;
};
/* TQP stats */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 7541cb9b71ce..f34851c91eb3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
}
}
-/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
+ * from mailbox message
* msg[0]: opcode
* msg[1]: <not relevant to this function>
* msg[2]: ring_num
* msg[3]: first ring type (TX|RX)
* msg[4]: first tqp id
- * msg[5] ~ msg[14]: other ring type and tqp id
+ * msg[5]: first int_gl idx
+ * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
*/
static int hclge_get_ring_chain_from_mbx(
struct hclge_mbx_vf_to_pf_cmd *req,
@@ -128,12 +130,12 @@ static int hclge_get_ring_chain_from_mbx(
HCLGE_MBX_RING_NODE_VARIABLE_NUM))
return -ENOMEM;
- hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
- hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- req->msg[5]);
+ hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg[5]);
cur_chain = ring_chain;
@@ -142,19 +144,19 @@ static int hclge_get_ring_chain_from_mbx(
if (!new_chain)
goto err;
- hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
+ hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
- hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
+ hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
cur_chain->next = new_chain;
cur_chain = new_chain;
@@ -460,7 +462,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 9f7932e423b5..b6cfe6ff988d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -67,16 +67,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
- hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
- hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, regnum);
- hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
- HCLGE_MDIO_CTRL_ST_S, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
- HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
mdio_cmd->data_wr = cpu_to_le16(data);
@@ -105,16 +105,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
- hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
- HCLGE_MDIO_PHYID_S, phyid);
- hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
- HCLGE_MDIO_PHYREG_S, regnum);
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, regnum);
- hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
- HCLGE_MDIO_CTRL_ST_S, 1);
- hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
- HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
/* Read out phy data */
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
return ret;
}
- if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
+ if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
dev_err(&hdev->pdev->dev, "mdio read data error\n");
return -EIO;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 262c125f8137..e2acf3bd6ba3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1184,10 +1184,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
- grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
- HCLGE_BP_GRP_ID_S);
- sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
- HCLGE_BP_SUB_GRP_ID_S);
+ grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+ HCLGE_BP_GRP_ID_S);
+ sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
@@ -1223,6 +1223,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
tx_en = true;
rx_en = true;
break;
+ case HCLGE_FC_PFC:
+ tx_en = false;
+ rx_en = false;
+ break;
default:
tx_en = true;
rx_en = true;
@@ -1240,8 +1244,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret)
return ret;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index c2b6e8a6700f..c82d49ebd5bf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd {
};
#define hclge_tm_set_field(dest, string, val) \
- hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
- (HCLGE_TM_SHAP_##string##_LSH), val)
+ hnae3_set_field((dest), \
+ (HCLGE_TM_SHAP_##string##_MSK), \
+ (HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
- hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
+ hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 1bbfe131b596..fb471fe2c494 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+ size, &ring->desc_dma_addr,
+ GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
- size, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
- return -ENOMEM;
- }
-
return 0;
}
static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
{
- dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
- ring->desc_num * sizeof(ring->desc[0]),
- hclgevf_ring_to_dma_dir(ring));
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
}
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a17872aab168..d1f16f0c1646 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
{
+ if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
hdev->num_msi_left += 1;
hdev->num_msi_used -= 1;
@@ -444,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
- HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
- HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -547,24 +553,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle)
}
static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
- int vector,
+ int vector_id,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_ring_chain_node *node;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclgevf_desc desc;
- int i = 0, vector_id;
+ int i = 0;
int status;
u8 type;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- vector_id = hclgevf_get_vector_index(hdev, vector);
- if (vector_id < 0) {
- dev_err(&handle->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
- return vector_id;
- }
for (node = ring_chain; node; node = node->next) {
int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
@@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
}
req->msg[idx_offset] =
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
req->msg[idx_offset + 1] = node->tqp_index;
- req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S);
+ req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
i++;
if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
@@ -617,7 +617,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
- return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
}
static int hclgevf_unmap_ring_from_vector(
@@ -635,7 +645,7 @@ static int hclgevf_unmap_ring_from_vector(
return vector_id;
}
- ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
if (ret)
dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vector=%d, ret =%d\n",
@@ -648,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector(
static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
- hclgevf_free_vector(hdev, vector);
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "hclgevf_put_vector get vector index fail. ret =%d\n",
+ vector_id);
+ return vector_id;
+ }
+
+ hclgevf_free_vector(hdev, vector_id);
return 0;
}
@@ -990,8 +1009,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
/* wait to check the hardware reset completion status */
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
+ while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
+ (cnt < HCLGEVF_RESET_WAIT_CNT)) {
msleep(HCLGEVF_RESET_WAIT_MS);
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
cnt++;
@@ -1582,9 +1601,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
-static int hclgevf_init_instance(struct hclgevf_dev *hdev,
- struct hnae3_client *client)
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
+ struct hclgevf_dev *hdev = ae_dev->priv;
int ret;
switch (client->type) {
@@ -1635,9 +1655,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev,
return 0;
}
-static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
- struct hnae3_client *client)
+static void hclgevf_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
/* un-init roce, if it exists */
if (hdev->roce_client)
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
@@ -1648,22 +1670,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
client->ops->uninit_instance(&hdev->nic, 0);
}
-static int hclgevf_register_client(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- return hclgevf_init_instance(hdev, client);
-}
-
-static void hclgevf_unregister_client(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
-{
- struct hclgevf_dev *hdev = ae_dev->priv;
-
- hclgevf_uninit_instance(hdev, client);
-}
-
static int hclgevf_pci_init(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -1924,8 +1930,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
- .init_client_instance = hclgevf_register_client,
- .uninit_client_instance = hclgevf_unregister_client,
+ .init_client_instance = hclgevf_init_client_instance,
+ .uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
@@ -1962,7 +1968,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
static struct hnae3_ae_algo ae_algovf = {
.ops = &hclgevf_ops,
- .name = HCLGEVF_NAME,
.pdev_id_table = ae_algovf_pci_tbl,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index b598c06af8e0..e9d5a4f96304 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* tail the async message in arq */
msg_q = hdev->arq.msg_q[hdev->arq.tail];
- memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE);
+ memcpy(&msg_q[0], req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
hdev->arq.count++;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 79b567447084..6b19607a4caa 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -264,7 +264,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_cmd_fw_ctxt fw_ctxt;
- struct hinic_pfhwdev *pfhwdev;
u16 out_size;
int err;
@@ -276,8 +275,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
&fw_ctxt, sizeof(fw_ctxt),
&fw_ctxt, &out_size);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c944bd10b03d..51762428b40e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7522,7 +7522,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -7554,7 +7554,7 @@ static int i40e_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
- np, np);
+ np, np, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
return 0;
@@ -11841,7 +11841,6 @@ static int i40e_xdp(struct net_device *dev,
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
return 0;
default:
@@ -11978,7 +11977,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
IFNAMSIZ - 4,
pf->vsi[pf->lan_vsi]->netdev->name);
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_mac_filter(vsi, mac_addr);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index a7b87f935411..5906c1c1d19d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
adapter);
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 252440a418dc..8a28f3388f69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1048,6 +1048,22 @@
#define E1000_TQAVCTRL_XMIT_MODE BIT(0)
#define E1000_TQAVCTRL_DATAFETCHARB BIT(4)
#define E1000_TQAVCTRL_DATATRANARB BIT(8)
+#define E1000_TQAVCTRL_DATATRANTIM BIT(9)
+#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10)
+/* Fetch Time Delta - bits 31:16
+ *
+ * This field holds the value to be reduced from the launch time for
+ * fetch time decision. The FetchTimeDelta value is defined in 32 ns
+ * granularity.
+ *
+ * This field is 16 bits wide, and so the maximum value is:
+ *
+ * 65535 * 32 = 2097120 ~= 2.1 msec
+ *
+ * XXX: We are configuring the max value here since we couldn't come up
+ * with a reason for not doing so.
+ */
+#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16)
/* TX Qav Credit Control fields */
#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9643b5b3d444..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -262,6 +262,7 @@ struct igb_ring {
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
+ bool launchtime_enable; /* true if LaunchTime is enabled */
bool cbs_enable; /* indicates if CBS is enabled */
s32 idleslope; /* idleSlope in kbps */
s32 sendslope; /* sendSlope in kbps */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f707709969ac..e3a0c02721c9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1654,33 +1654,65 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
wr32(E1000_I210_TQAVCC(queue), val);
}
+static bool is_any_cbs_enabled(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i]->cbs_enable)
+ return true;
+ }
+
+ return false;
+}
+
+static bool is_any_txtime_enabled(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i]->launchtime_enable)
+ return true;
+ }
+
+ return false;
+}
+
/**
- * igb_configure_cbs - Configure Credit-Based Shaper (CBS)
+ * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
* @adapter: pointer to adapter struct
* @queue: queue number
- * @enable: true = enable CBS, false = disable CBS
- * @idleslope: idleSlope in kbps
- * @sendslope: sendSlope in kbps
- * @hicredit: hiCredit in bytes
- * @locredit: loCredit in bytes
*
- * Configure CBS for a given hardware queue. When disabling, idleslope,
- * sendslope, hicredit, locredit arguments are ignored. Returns 0 if
- * success. Negative otherwise.
+ * Configure CBS and Launchtime for a given hardware queue.
+ * Parameters are retrieved from the correct Tx ring, so
+ * igb_save_cbs_params() and igb_save_txtime_params() should be used
+ * for setting those correctly prior to this function being called.
**/
-static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
- bool enable, int idleslope, int sendslope,
- int hicredit, int locredit)
+static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
+ struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- u32 tqavcc;
+ u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
- if (enable || queue == 0) {
+ /* If any of the Qav features is enabled, configure queues as SR and
+ * with HIGH PRIO. If none is, then configure them with LOW PRIO and
+ * as SP.
+ */
+ if (ring->cbs_enable || ring->launchtime_enable) {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
+ set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ } else {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
+ set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
+ }
+
+ /* If CBS is enabled, set DataTranARB and config its parameters. */
+ if (ring->cbs_enable || queue == 0) {
/* i210 does not allow the queue 0 to be in the Strict
* Priority mode while the Qav mode is enabled, so,
* instead of disabling strict priority mode, we give
@@ -1690,14 +1722,19 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* Queue0 QueueMode must be set to 1b when
* TransmitMode is set to Qav."
*/
- if (queue == 0 && !enable) {
+ if (queue == 0 && !ring->cbs_enable) {
/* max "linkspeed" idleslope in kbps */
- idleslope = 1000000;
- hicredit = ETH_FRAME_LEN;
+ ring->idleslope = 1000000;
+ ring->hicredit = ETH_FRAME_LEN;
}
- set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
- set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ /* Always set data transfer arbitration to credit-based
+ * shaper algorithm on TQAVCTRL if CBS is enabled for any of
+ * the queues.
+ */
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
/* According to i210 datasheet section 7.2.7.7, we should set
* the 'idleSlope' field from TQAVCC register following the
@@ -1756,17 +1793,16 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* calculated value, so the resulting bandwidth might
* be slightly higher for some configurations.
*/
- value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
+ value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
tqavcc = rd32(E1000_I210_TQAVCC(queue));
tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
tqavcc |= value;
wr32(E1000_I210_TQAVCC(queue), tqavcc);
- wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
+ wr32(E1000_I210_TQAVHC(queue),
+ 0x80000000 + ring->hicredit * 0x7735);
} else {
- set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
- set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
/* Set idleSlope to zero. */
tqavcc = rd32(E1000_I210_TQAVCC(queue));
@@ -1775,6 +1811,43 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
/* Set hiCredit to zero. */
wr32(E1000_I210_TQAVHC(queue), 0);
+
+ /* If CBS is not enabled for any queues anymore, then return to
+ * the default state of Data Transmission Arbitration on
+ * TQAVCTRL.
+ */
+ if (!is_any_cbs_enabled(adapter)) {
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ }
+ }
+
+ /* If LaunchTime is enabled, set DataTranTIM. */
+ if (ring->launchtime_enable) {
+ /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
+ * for any of the SR queues, and configure fetchtime delta.
+ * XXX NOTE:
+ * - LaunchTime will be enabled for all SR queues.
+ * - A fixed offset can be added relative to the launch
+ * time of all packets if configured at reg LAUNCH_OS0.
+ * We are keeping it as 0 for now (default value).
+ */
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
+ E1000_TQAVCTRL_FETCHTIME_DELTA;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ } else {
+ /* If Launchtime is not enabled for any SR queues anymore,
+ * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
+ * effectively disabling Launchtime.
+ */
+ if (!is_any_txtime_enabled(adapter)) {
+ tqavctrl = rd32(E1000_I210_TQAVCTRL);
+ tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
+ tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
+ wr32(E1000_I210_TQAVCTRL, tqavctrl);
+ }
}
/* XXX: In i210 controller the sendSlope and loCredit parameters from
@@ -1782,9 +1855,27 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* configuration' in respect to these parameters.
*/
- netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
- (enable) ? "enabled" : "disabled", queue,
- idleslope, sendslope, hicredit, locredit);
+ netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
+ idleslope %d sendslope %d hiCredit %d \
+ locredit %d\n",
+ (ring->cbs_enable) ? "enabled" : "disabled",
+ (ring->launchtime_enable) ? "enabled" : "disabled", queue,
+ ring->idleslope, ring->sendslope, ring->hicredit,
+ ring->locredit);
+}
+
+static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
+ bool enable)
+{
+ struct igb_ring *ring;
+
+ if (queue < 0 || queue > adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+ ring->launchtime_enable = enable;
+
+ return 0;
}
static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
@@ -1807,21 +1898,15 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
return 0;
}
-static bool is_any_cbs_enabled(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = adapter->tx_ring[i];
-
- if (ring->cbs_enable)
- return true;
- }
-
- return false;
-}
-
+/**
+ * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
+ * @adapter: pointer to adapter struct
+ *
+ * Configure TQAVCTRL register switching the controller's Tx mode
+ * if FQTSS mode is enabled or disabled. Additionally, will issue
+ * a call to igb_config_tx_modes() per queue so any previously saved
+ * Tx parameters are applied.
+ **/
static void igb_setup_tx_mode(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1836,11 +1921,11 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
int i, max_queue;
/* Configure TQAVCTRL register: set transmit mode to 'Qav',
- * set data fetch arbitration to 'round robin' and set data
- * transfer arbitration to 'credit shaper algorithm.
+ * set data fetch arbitration to 'round robin', set SP_WAIT_SR
+ * so SP queues wait for SR ones.
*/
val = rd32(E1000_I210_TQAVCTRL);
- val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
+ val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
val &= ~E1000_TQAVCTRL_DATAFETCHARB;
wr32(E1000_I210_TQAVCTRL, val);
@@ -1881,11 +1966,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
adapter->num_tx_queues : I210_SR_QUEUES_NUM;
for (i = 0; i < max_queue; i++) {
- struct igb_ring *ring = adapter->tx_ring[i];
-
- igb_configure_cbs(adapter, i, ring->cbs_enable,
- ring->idleslope, ring->sendslope,
- ring->hicredit, ring->locredit);
+ igb_config_tx_modes(adapter, i);
}
} else {
wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
@@ -2459,6 +2540,19 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
+{
+ if (!is_fqtss_enabled(adapter)) {
+ enable_fqtss(adapter, true);
+ return;
+ }
+
+ igb_config_tx_modes(adapter, queue);
+
+ if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
+ enable_fqtss(adapter, false);
+}
+
static int igb_offload_cbs(struct igb_adapter *adapter,
struct tc_cbs_qopt_offload *qopt)
{
@@ -2479,17 +2573,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
if (err)
return err;
- if (is_fqtss_enabled(adapter)) {
- igb_configure_cbs(adapter, qopt->queue, qopt->enable,
- qopt->idleslope, qopt->sendslope,
- qopt->hicredit, qopt->locredit);
-
- if (!is_any_cbs_enabled(adapter))
- enable_fqtss(adapter, false);
-
- } else {
- enable_fqtss(adapter, true);
- }
+ igb_offload_apply(adapter, qopt->queue);
return 0;
}
@@ -2698,7 +2782,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
@@ -2728,7 +2812,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
adapter);
@@ -2738,6 +2822,29 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
}
}
+static int igb_offload_txtime(struct igb_adapter *adapter,
+ struct tc_etf_qopt_offload *qopt)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* Launchtime offloading is only supported by i210 controller. */
+ if (hw->mac.type != e1000_i210)
+ return -EOPNOTSUPP;
+
+ /* Launchtime offloading is only supported by queues 0 and 1. */
+ if (qopt->queue < 0 || qopt->queue > 1)
+ return -EINVAL;
+
+ err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
+ if (err)
+ return err;
+
+ igb_offload_apply(adapter, qopt->queue);
+
+ return 0;
+}
+
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -2748,6 +2855,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
return igb_setup_tc_block(adapter, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return igb_offload_txtime(adapter, type_data);
default:
return -EOPNOTSUPP;
@@ -5568,11 +5677,14 @@ set_itr_now:
}
}
-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
- u32 type_tucmd, u32 mss_l4len_idx)
+static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
+ struct igb_tx_buffer *first,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
{
struct e1000_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
+ struct timespec64 ts;
context_desc = IGB_TX_CTXTDESC(tx_ring, i);
@@ -5587,9 +5699,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
mss_l4len_idx |= tx_ring->reg_idx << 4;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ /* We assume there is always a valid tx time available. Invalid times
+ * should have been handled by the upper layers.
+ */
+ if (tx_ring->launchtime_enable) {
+ ts = ns_to_timespec64(first->skb->tstamp);
+ context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
+ } else {
+ context_desc->seqnum_seed = 0;
+ }
}
static int igb_tso(struct igb_ring *tx_ring,
@@ -5672,7 +5793,8 @@ static int igb_tso(struct igb_ring *tx_ring,
vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
return 1;
}
@@ -5727,7 +5849,7 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
+ igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
}
#define IGB_SET_FLAG(_input, _flag, _result) \
@@ -6015,8 +6137,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
}
}
- skb_tx_timestamp(skb);
-
if (skb_vlan_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -6032,6 +6152,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
+ skb_tx_timestamp(skb);
+
if (igb_tx_map(tx_ring, first, hdr_len))
goto cleanup_tx_tstamp;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 62e57b05a0ae..5a6600f7b382 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5275,6 +5275,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
struct ixgbe_fwd_adapter *accel)
{
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
struct net_device *vdev = accel->netdev;
int i, baseq, err;
@@ -5286,6 +5288,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
accel->rx_base_queue = baseq;
accel->tx_base_queue = baseq;
+ /* record configuration for macvlan interface in vdev */
+ for (i = 0; i < num_tc; i++)
+ netdev_bind_sb_channel_queue(adapter->netdev, vdev,
+ i, rss_i, baseq + (rss_i * i));
+
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = vdev;
@@ -5310,6 +5317,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
@@ -8197,25 +8208,25 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
input, common, ring->queue_index);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
- struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
struct ixgbe_adapter *adapter;
- int txq;
-#ifdef IXGBE_FCOE
struct ixgbe_ring_feature *f;
-#endif
+ int txq;
- if (fwd_adapter) {
- adapter = netdev_priv(dev);
- txq = reciprocal_scale(skb_get_hash(skb),
- adapter->num_rx_queues_per_pool);
+ if (sb_dev) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ struct net_device *vdev = sb_dev;
- return txq + fwd_adapter->tx_base_queue;
- }
+ txq = vdev->tc_to_txq[tc].offset;
+ txq += reciprocal_scale(skb_get_hash(skb),
+ vdev->tc_to_txq[tc].count);
-#ifdef IXGBE_FCOE
+ return txq;
+ }
/*
* only execute the code below if protocol is FCoE
@@ -8226,11 +8237,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
/* fall through */
default:
- return fallback(dev, skb);
+ return fallback(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -8242,11 +8253,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
txq -= f->indices;
return txq + f->offset;
-#else
- return fallback(dev, skb);
-#endif
}
+#endif
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf)
{
@@ -8766,6 +8775,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
/* if we cannot find a free pool then disable the offload */
netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
macvlan_release_l2fw_offload(vdev);
+
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(adapter->netdev, vdev);
+ netdev_set_sb_channel(vdev, 0);
+
kfree(accel);
return 0;
@@ -9329,7 +9343,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
- adapter, adapter);
+ adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
adapter);
@@ -9769,6 +9783,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!macvlan_supports_dest_filter(vdev))
return ERR_PTR(-EMEDIUMTYPE);
+ /* We need to lock down the macvlan to be a single queue device so that
+ * we can reuse the tc_to_txq field in the macvlan netdev to represent
+ * the queue mapping to our netdev.
+ */
+ if (netif_is_multiqueue(vdev))
+ return ERR_PTR(-ERANGE);
+
pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
if (pool == adapter->num_rx_pools) {
u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
@@ -9825,6 +9846,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
return ERR_PTR(-ENOMEM);
set_bit(pool, adapter->fwd_bitmask);
+ netdev_set_sb_channel(vdev, pool);
accel->pool = pool;
accel->netdev = vdev;
@@ -9866,6 +9888,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
ring->netdev = NULL;
}
+ /* unbind the queues and drop the subordinate channel config */
+ netdev_unbind_sb_channel(pdev, accel->netdev);
+ netdev_set_sb_channel(accel->netdev, 0);
+
clear_bit(accel->pool, adapter->fwd_bitmask);
kfree(accel);
}
@@ -9966,7 +9992,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!(adapter->xdp_prog);
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
@@ -10026,7 +10051,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
- .ndo_select_queue = ixgbe_select_queue,
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -10049,6 +10073,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef IXGBE_FCOE
+ .ndo_select_queue = ixgbe_select_queue,
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59416eddd840..d86446d202d5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return ixgbevf_xdp_setup(dev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!(adapter->xdp_prog);
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index afc810069440..7a637b51c7d2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
-{
- /* we are currently only using the first queue */
- return 0;
-}
-
static int
ltq_etop_init(struct net_device *dev)
{
@@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
- .ndo_select_queue = ltq_etop_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_init = ltq_etop_init,
.ndo_tx_timeout = ltq_etop_tx_timeout,
};
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
index 4d11dd9e3246..51f65a202c6e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/Makefile
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -4,4 +4,4 @@
#
obj-$(CONFIG_MVPP2) := mvpp2.o
-mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o
+mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index def00dc3eb4e..67b9e81b7c02 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1,17 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for Marvell PPv2 network controller for Armada 375 SoC.
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _MVPP2_H_
#define _MVPP2_H_
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@@ -66,15 +64,18 @@
#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240
+#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244
+#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0)
/* RSS Registers */
#define MVPP22_RSS_INDEX 0x1500
#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
-#define MVPP22_RSS_TABLE_ENTRY 0x1508
-#define MVPP22_RSS_TABLE 0x1510
+#define MVPP22_RXQ2RSS_TABLE 0x1504
#define MVPP22_RSS_TABLE_POINTER(p) (p)
+#define MVPP22_RSS_TABLE_ENTRY 0x1508
#define MVPP22_RSS_WIDTH 0x150c
/* Classifier Registers */
@@ -86,11 +87,28 @@
#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
#define MVPP2_CLS_LKP_TBL_REG 0x1818
#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16)
#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0)
+#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL0_OFFS 1
+#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4)
+#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23)
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
+#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
+#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
+#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15)
#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6)
+#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6))
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
@@ -98,6 +116,32 @@
#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+/* Classifier C2 engine Registers */
+#define MVPP22_CLS_C2_TCAM_IDX 0x1b00
+#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10
+#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14
+#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
+#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
+#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
+#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
+#define MVPP22_CLS_C2_HIT_CTR 0x1b50
+#define MVPP22_CLS_C2_ACT 0x1b60
+#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
+#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
+#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
+#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
+#define MVPP22_CLS_C2_ATTR0 0x1b64
+#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
+#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
+#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24
+#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21)
+#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7
+#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21
+#define MVPP22_CLS_C2_ATTR1 0x1b68
+#define MVPP22_CLS_C2_ATTR2 0x1b6c
+#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
+#define MVPP22_CLS_C2_ATTR3 0x1b70
+
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
@@ -275,6 +319,11 @@
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
+/* Hit counters registers */
+#define MVPP2_CTRS_IDX 0x7040
+#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700
+#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704
+
/* TX Scheduler registers */
#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
@@ -499,7 +548,7 @@
#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
/* Dfault number of RXQs in use */
-#define MVPP2_DEFAULT_RXQ 4
+#define MVPP2_DEFAULT_RXQ 1
/* Max number of Rx descriptors */
#define MVPP2_MAX_RXD_MAX 1024
@@ -553,6 +602,11 @@
((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
+#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
+#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
+
+/* RSS constants */
+#define MVPP22_RSS_TABLE_ENTRIES 32
/* IPv6 max L3 address size */
#define MVPP2_MAX_L3_ADDR_SIZE 16
@@ -703,6 +757,9 @@ struct mvpp2 {
/* Workqueue to gather hardware statistics */
char queue_name[30];
struct workqueue_struct *stats_queue;
+
+ /* Debugfs root entry */
+ struct dentry *dbgfs_dir;
};
struct mvpp2_pcpu_stats {
@@ -795,6 +852,9 @@ struct mvpp2_port {
bool has_tx_irqs;
u32 tx_time_coal;
+
+ /* RSS indirection table */
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -831,52 +891,52 @@ struct mvpp2_port {
/* HW TX descriptor for PPv2.1 */
struct mvpp21_tx_desc {
- u32 command; /* Options used by HW for packet transmitting.*/
+ __le32 command; /* Options used by HW for packet transmitting.*/
u8 packet_offset; /* the offset from the buffer beginning */
u8 phys_txq; /* destination queue ID */
- u16 data_size; /* data size of transmitted packet in bytes */
- u32 buf_dma_addr; /* physical addr of transmitted buffer */
- u32 buf_cookie; /* cookie for access to TX buffer in tx path */
- u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
- u32 reserved2; /* reserved (for future use) */
+ __le16 data_size; /* data size of transmitted packet in bytes */
+ __le32 buf_dma_addr; /* physical addr of transmitted buffer */
+ __le32 buf_cookie; /* cookie for access to TX buffer in tx path */
+ __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
+ __le32 reserved2; /* reserved (for future use) */
};
/* HW RX descriptor for PPv2.1 */
struct mvpp21_rx_desc {
- u32 status; /* info about received packet */
- u16 reserved1; /* parser_info (for future use, PnC) */
- u16 data_size; /* size of received packet in bytes */
- u32 buf_dma_addr; /* physical address of the buffer */
- u32 buf_cookie; /* cookie for access to RX buffer in rx path */
- u16 reserved2; /* gem_port_id (for future use, PON) */
- u16 reserved3; /* csum_l4 (for future use, PnC) */
+ __le32 status; /* info about received packet */
+ __le16 reserved1; /* parser_info (for future use, PnC) */
+ __le16 data_size; /* size of received packet in bytes */
+ __le32 buf_dma_addr; /* physical address of the buffer */
+ __le32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ __le16 reserved2; /* gem_port_id (for future use, PON) */
+ __le16 reserved3; /* csum_l4 (for future use, PnC) */
u8 reserved4; /* bm_qset (for future use, BM) */
u8 reserved5;
- u16 reserved6; /* classify_info (for future use, PnC) */
- u32 reserved7; /* flow_id (for future use, PnC) */
- u32 reserved8;
+ __le16 reserved6; /* classify_info (for future use, PnC) */
+ __le32 reserved7; /* flow_id (for future use, PnC) */
+ __le32 reserved8;
};
/* HW TX descriptor for PPv2.2 */
struct mvpp22_tx_desc {
- u32 command;
+ __le32 command;
u8 packet_offset;
u8 phys_txq;
- u16 data_size;
- u64 reserved1;
- u64 buf_dma_addr_ptp;
- u64 buf_cookie_misc;
+ __le16 data_size;
+ __le64 reserved1;
+ __le64 buf_dma_addr_ptp;
+ __le64 buf_cookie_misc;
};
/* HW RX descriptor for PPv2.2 */
struct mvpp22_rx_desc {
- u32 status;
- u16 reserved1;
- u16 data_size;
- u32 reserved2;
- u32 reserved3;
- u64 buf_dma_addr_key_hash;
- u64 buf_cookie_misc;
+ __le32 status;
+ __le16 reserved1;
+ __le16 data_size;
+ __le32 reserved2;
+ __le32 reserved3;
+ __le64 buf_dma_addr_key_hash;
+ __le64 buf_cookie_misc;
};
/* Opaque type used by the driver to manipulate the HW TX and RX
@@ -1043,4 +1103,8 @@ u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
u32 data);
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 8581d5b17dd5..efdb7a656835 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1,17 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RSS and Classifier helpers for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include "mvpp2.h"
#include "mvpp2_cls.h"
+#include "mvpp2_prs.h"
+
+#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
+{ \
+ .flow_type = _type, \
+ .flow_id = _id, \
+ .supported_hash_opts = _opts, \
+ .prs_ri = { \
+ .ri = _ri, \
+ .ri_mask = _ri_mask \
+ } \
+}
+
+static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+ /* TCP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP4_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, Not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv4 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv4 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* TCP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* TCP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP22_CLS_HEK_IP6_5T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, not fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* UDP over IPv6 flows, fragmented, no vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+ /* UDP over IPv6 flows, fragmented, with vlan tag */
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_IP_MASK),
+
+ /* IPv4 flows, no vlan tag */
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP22_CLS_HEK_IP4_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv4 flows, with vlan tag */
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, no vlan tag */
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP22_CLS_HEK_IP6_2T,
+ MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* IPv6 flows, with vlan tag */
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK),
+
+ /* Non IP flow, no vlan tag */
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
+ 0,
+ MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK),
+ /* Non IP flow, with vlan tag */
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
+ MVPP22_CLS_HEK_OPT_VLAN,
+ 0, 0),
+};
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe)
+{
+ fe->index = index;
+ mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
+ fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
+ fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
+ fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
+}
/* Update classification flow table registers */
static void mvpp2_cls_flow_write(struct mvpp2 *priv,
@@ -23,6 +349,25 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
}
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
+{
+ mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+ return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le)
+{
+ u32 val;
+
+ val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
+ mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+ le->way = way;
+ le->lkpid = lkpid;
+ le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
+}
+
/* Update classification lookup table register */
static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
struct mvpp2_cls_lookup_entry *le)
@@ -34,6 +379,439 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
}
+/* Operations on flow entry */
+static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+}
+
+static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
+ int num_of_fields)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
+}
+
+static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
+ int field_index)
+{
+ return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK;
+}
+
+static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
+ int field_index, int field_id)
+{
+ fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
+ MVPP2_CLS_FLOW_TBL2_FLD_MASK);
+ fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
+}
+
+static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
+ int engine)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
+}
+
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
+ MVPP2_CLS_FLOW_TBL0_ENG_MASK;
+}
+
+static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
+ bool from_packet)
+{
+ if (from_packet)
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+ else
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+}
+
+static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
+}
+
+static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
+ bool is_last)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
+ fe->data[0] |= !!is_last;
+}
+
+static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
+}
+
+static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
+ u32 port)
+{
+ fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+/* Initialize the parser entry for the given flow */
+static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
+ struct mvpp2_cls_flow *flow)
+{
+ mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
+ flow->prs_ri.ri_mask);
+}
+
+/* Initialize the Lookup Id table entry for the given flow */
+static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
+ struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_lookup_entry le;
+
+ le.way = 0;
+ le.lkpid = flow->flow_id;
+
+ /* The default RxQ for this port is set in the C2 lookup */
+ le.data = 0;
+
+ /* We point on the first lookup in the sequence for the flow, that is
+ * the C2 lookup.
+ */
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+
+ /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
+ le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ mvpp2_cls_lookup_write(priv, &le);
+}
+
+/* Initialize the flow table entries for the given flow */
+static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+{
+ struct mvpp2_cls_flow_entry fe;
+ int i;
+
+ /* C2 lookup */
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_last_set(&fe, 0);
+ mvpp2_cls_flow_pri_set(&fe, 0);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+
+ /* Add all ports */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++)
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+
+ /* C3Hx lookups */
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_pri_set(&fe, i + 1);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
+ mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ /* Update the last entry */
+ mvpp2_cls_flow_last_set(&fe, 1);
+ mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
+
+ mvpp2_cls_flow_write(priv, &fe);
+}
+
+/* Adds a field to the Header Extracted Key generation parameters*/
+static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
+ u32 field_id)
+{
+ int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ if (nb_fields == MVPP2_FLOW_N_FIELDS)
+ return -EINVAL;
+
+ mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
+
+ mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
+
+ return 0;
+}
+
+static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
+ unsigned long hash_opts)
+{
+ u32 field_id;
+ int i;
+
+ /* Clear old fields */
+ mvpp2_cls_flow_hek_num_set(fe, 0);
+ fe->data[2] = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ field_id = MVPP22_CLS_FIELD_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ field_id = MVPP22_CLS_FIELD_IP4SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ field_id = MVPP22_CLS_FIELD_IP4DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ field_id = MVPP22_CLS_FIELD_IP6SA;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ field_id = MVPP22_CLS_FIELD_IP6DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ field_id = MVPP22_CLS_FIELD_L4SIP;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ field_id = MVPP22_CLS_FIELD_L4DIP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (mvpp2_flow_add_hek_field(fe, field_id))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+ if (flow >= MVPP2_N_FLOWS)
+ return NULL;
+
+ return &cls_flows[flow];
+}
+
+/* Set the hash generation options for the given traffic flow.
+ * One traffic flow (in the ethtool sense) has multiple classification flows,
+ * to handle specific cases such as fragmentation, or the presence of a
+ * VLAN / DSA Tag.
+ *
+ * Each of these individual flows has different constraints, for example we
+ * can't hash fragmented packets on L4 data (else we would risk having packet
+ * re-ordering), so each classification flows masks the options with their
+ * supported ones.
+ *
+ */
+static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
+ u16 requested_opts)
+{
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
+ int i, engine, flow_index;
+ u16 hash_opts;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return -EINVAL;
+
+ if (flow->flow_type != flow_type)
+ continue;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+ flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = flow->supported_hash_opts & requested_opts;
+
+ /* Use C3HB engine to access L4 infos. This adds L4 infos to the
+ * hash parameters
+ */
+ if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
+ engine = MVPP22_CLS_ENGINE_C3HB;
+ else
+ engine = MVPP22_CLS_ENGINE_C3HA;
+
+ if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
+ return -EINVAL;
+
+ mvpp2_cls_flow_eng_set(&fe, engine);
+
+ mvpp2_cls_flow_write(port->priv, &fe);
+ }
+
+ return 0;
+}
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
+{
+ u16 hash_opts = 0;
+ int n_fields, i, field;
+
+ n_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+ for (i = 0; i < n_fields; i++) {
+ field = mvpp2_cls_flow_hek_get(fe, i);
+
+ switch (field) {
+ case MVPP22_CLS_FIELD_MAC_DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ break;
+ case MVPP22_CLS_FIELD_VLAN:
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ break;
+ case MVPP22_CLS_FIELD_L3_PROTO:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ break;
+ case MVPP22_CLS_FIELD_IP4SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
+ break;
+ case MVPP22_CLS_FIELD_IP4DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
+ break;
+ case MVPP22_CLS_FIELD_IP6SA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
+ break;
+ case MVPP22_CLS_FIELD_IP6DA:
+ hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
+ break;
+ case MVPP22_CLS_FIELD_L4SIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ break;
+ case MVPP22_CLS_FIELD_L4DIP:
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ break;
+ default:
+ break;
+ }
+ }
+ return hash_opts;
+}
+
+/* Returns the hash opts for this flow. There are several classifier flows
+ * for one traffic flow, this returns an aggregation of all configurations.
+ */
+static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
+{
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
+ int i, flow_index;
+ u16 hash_opts = 0;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ if (flow->flow_type != flow_type)
+ continue;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+ flow->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts |= mvpp2_flow_get_hek_fields(&fe);
+ }
+
+ return hash_opts;
+}
+
+static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
+{
+ struct mvpp2_cls_flow *flow;
+ int i;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ break;
+
+ mvpp2_cls_flow_prs_init(priv, flow);
+ mvpp2_cls_flow_lkp_init(priv, flow);
+ mvpp2_cls_flow_init(priv, flow);
+ }
+}
+
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ /* Write TCAM */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+}
+
+static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql, pmap;
+
+ memset(&c2, 0, sizeof(c2));
+
+ c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
+
+ pmap = BIT(port->id);
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+ /* Update RSS status after matching this entry */
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
+ /* Mark packet as "forwarded to software", needed for RSS */
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+ /* Configure the default rx queue : Update Queue Low and Queue High, but
+ * don't lock, since the rx queue selection might be overridden by RSS
+ */
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
+
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
/* Classifier default initialization */
void mvpp2_cls_init(struct mvpp2 *priv)
{
@@ -61,6 +839,8 @@ void mvpp2_cls_init(struct mvpp2 *priv)
le.way = 1;
mvpp2_cls_lookup_write(priv, &le);
}
+
+ mvpp2_cls_port_init_flows(priv);
}
void mvpp2_cls_port_config(struct mvpp2_port *port)
@@ -89,6 +869,47 @@ void mvpp2_cls_port_config(struct mvpp2_port *port)
/* Update lookup ID table entry */
mvpp2_cls_lookup_write(port->priv, &le);
+
+ mvpp2_port_c2_cls_init(port);
+}
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
+
+ return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
+}
+
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+void mvpp22_rss_enable(struct mvpp2_port *port)
+{
+ mvpp2_rss_port_c2_enable(port);
+}
+
+void mvpp22_rss_disable(struct mvpp2_port *port)
+{
+ mvpp2_rss_port_c2_disable(port);
}
/* Set CPU queue number for oversize packets */
@@ -107,7 +928,116 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
-void mvpp22_init_rss(struct mvpp2_port *port)
+static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
+{
+ int nrxqs, cpu, cpus = num_possible_cpus();
+
+ /* Number of RXQs per CPU */
+ nrxqs = port->nrxqs / cpus;
+
+ /* CPU that will handle this rx queue */
+ cpu = rxq / nrxqs;
+
+ if (!cpu_online(cpu))
+ return port->first_rxq;
+
+ /* Indirection to better distribute the paquets on the CPUs when
+ * configuring the RSS queues.
+ */
+ return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
+}
+
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
+{
+ struct mvpp2 *priv = port->priv;
+ int i;
+
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
+ u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
+ MVPP22_RSS_INDEX_TABLE_ENTRY(i);
+ mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+
+ mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
+ mvpp22_rxfh_indir(port, port->indir[i]));
+ }
+}
+
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ u16 hash_opts = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ if (info->data & RXH_L4_B_0_1)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+ if (info->data & RXH_L4_B_2_3)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+ /* Fallthrough */
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ if (info->data & RXH_L2DA)
+ hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+ if (info->data & RXH_VLAN)
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+ if (info->data & RXH_L3_PROTO)
+ hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+ if (info->data & RXH_IP_SRC)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
+ MVPP22_CLS_HEK_OPT_IP6SA);
+ if (info->data & RXH_IP_DST)
+ hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
+ MVPP22_CLS_HEK_OPT_IP6DA);
+ break;
+ default: return -EOPNOTSUPP;
+ }
+
+ return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
+}
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+ unsigned long hash_opts;
+ int i;
+
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
+ info->data = 0;
+
+ for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+ switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ info->data |= RXH_L2DA;
+ break;
+ case MVPP22_CLS_HEK_OPT_VLAN:
+ info->data |= RXH_VLAN;
+ break;
+ case MVPP22_CLS_HEK_OPT_L3_PROTO:
+ info->data |= RXH_L3_PROTO;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ info->data |= RXH_IP_SRC;
+ break;
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ info->data |= RXH_IP_DST;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ info->data |= RXH_L4_B_0_1;
+ break;
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ info->data |= RXH_L4_B_2_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void mvpp22_rss_port_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
int i;
@@ -115,27 +1045,30 @@ void mvpp22_init_rss(struct mvpp2_port *port)
/* Set the table width: replace the whole classifier Rx queue number
* with the ones configured in RSS table entries.
*/
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
- /* Loop through the classifier Rx Queues and map them to a RSS table.
- * Map them all to the first table (0) by default.
+ /* The default RxQ is used as a key to select the RSS table to use.
+ * We use one RSS table per port.
*/
- for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
- mvpp2_write(priv, MVPP22_RSS_TABLE,
- MVPP22_RSS_TABLE_POINTER(0));
- }
+ mvpp2_write(priv, MVPP22_RSS_INDEX,
+ MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(port->id));
/* Configure the first table to evenly distribute the packets across
- * real Rx Queues. The table entries map a hash to an port Rx Queue.
+ * real Rx Queues. The table entries map a hash to a port Rx Queue.
*/
- for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
- MVPP22_RSS_INDEX_TABLE_ENTRY(i);
- mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
+ port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
- mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
- }
+ mvpp22_rss_fill_table(port, port->id);
+ /* Configure default flows */
+ mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8e1d7f9ffa0b..089f05f29891 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -1,27 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* RSS and Classifier definitions for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _MVPP2_CLS_H_
#define _MVPP2_CLS_H_
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+
/* Classifier constants */
#define MVPP2_CLS_FLOWS_TBL_SIZE 512
#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
#define MVPP2_CLS_LKP_TBL_SIZE 64
#define MVPP2_CLS_RX_QUEUES 256
-/* RSS constants */
-#define MVPP22_RSS_TABLE_ENTRIES 32
+/* Classifier flow constants */
+
+#define MVPP2_FLOW_N_FIELDS 4
+
+enum mvpp2_cls_engine {
+ MVPP22_CLS_ENGINE_C2 = 1,
+ MVPP22_CLS_ENGINE_C3A,
+ MVPP22_CLS_ENGINE_C3B,
+ MVPP22_CLS_ENGINE_C4,
+ MVPP22_CLS_ENGINE_C3HA = 6,
+ MVPP22_CLS_ENGINE_C3HB = 7,
+};
+
+#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0)
+#define MVPP22_CLS_HEK_OPT_VLAN BIT(1)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2)
+#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6)
+#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8)
+#define MVPP22_CLS_HEK_N_FIELDS 9
+
+#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
+ MVPP22_CLS_HEK_OPT_L4DIP)
+
+#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \
+ MVPP22_CLS_HEK_OPT_IP4DA)
+
+#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \
+ MVPP22_CLS_HEK_OPT_IP6DA)
+
+/* The fifth tuple in "5T" is the L4_Info field */
+#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \
+ MVPP22_CLS_HEK_L4_OPTS)
+
+enum mvpp2_cls_field_id {
+ MVPP22_CLS_FIELD_MAC_DA = 0x03,
+ MVPP22_CLS_FIELD_VLAN = 0x06,
+ MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
+ MVPP22_CLS_FIELD_IP4SA = 0x10,
+ MVPP22_CLS_FIELD_IP4DA = 0x11,
+ MVPP22_CLS_FIELD_IP6SA = 0x17,
+ MVPP22_CLS_FIELD_IP6DA = 0x1a,
+ MVPP22_CLS_FIELD_L4SIP = 0x1d,
+ MVPP22_CLS_FIELD_L4DIP = 0x1e,
+};
+
+enum mvpp2_cls_flow_seq {
+ MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
+ MVPP2_CLS_FLOW_SEQ_FIRST1,
+ MVPP2_CLS_FLOW_SEQ_FIRST2,
+ MVPP2_CLS_FLOW_SEQ_LAST,
+ MVPP2_CLS_FLOW_SEQ_MIDDLE
+};
+
+/* Classifier C2 engine constants */
+#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
+
+enum mvpp22_cls_c2_action {
+ MVPP22_C2_NO_UPD = 0,
+ MVPP22_C2_NO_UPD_LOCK,
+ MVPP22_C2_UPD,
+ MVPP22_C2_UPD_LOCK,
+};
+
+enum mvpp22_cls_c2_fwd_action {
+ MVPP22_C2_FWD_NO_UPD = 0,
+ MVPP22_C2_FWD_NO_UPD_LOCK,
+ MVPP22_C2_FWD_SW,
+ MVPP22_C2_FWD_SW_LOCK,
+ MVPP22_C2_FWD_HW,
+ MVPP22_C2_FWD_HW_LOCK,
+ MVPP22_C2_FWD_HW_LOW_LAT,
+ MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
+};
+
+#define MVPP2_CLS_C2_TCAM_WORDS 5
+#define MVPP2_CLS_C2_ATTR_WORDS 5
+
+struct mvpp2_cls_c2_entry {
+ u32 index;
+ u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+ u32 act;
+ u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+};
+
+/* Classifier C2 engine entries */
+#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
+#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
+/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
+ *
+ * The first performs a lookup using the C2 TCAM engine, to tag the
+ * packet for software forwarding (needed for RSS), enable or disable RSS, and
+ * assign the default rx queue.
+ *
+ * The second configures the hash generation, by specifying which fields of the
+ * packet header are used to generate the hash, and specifies the relevant hash
+ * engine to use.
+ */
+#define MVPP22_RSS_FLOW_C2_OFFS 0
+#define MVPP22_RSS_FLOW_HASH_OFFS 1
+#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
+
+#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_C2_OFFS)
+#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_HASH_OFFS)
+#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
+
+/* Packet flow ID */
+enum mvpp2_prs_flow {
+ MVPP2_FL_START = 8,
+ MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START,
+ MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */
+ MVPP2_FL_IP4_TAG,
+ MVPP2_FL_IP6_UNTAG,
+ MVPP2_FL_IP6_TAG,
+ MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_FL_NON_IP_TAG,
+ MVPP2_FL_LAST,
+};
+
+struct mvpp2_cls_flow {
+ /* The L2-L4 traffic flow type */
+ int flow_type;
+
+ /* The first id in the flow table for this flow */
+ u16 flow_id;
+
+ /* The supported HEK fields for this flow */
+ u16 supported_hash_opts;
+
+ /* The Header Parser result_info that matches this flow */
+ struct mvpp2_prs_result_info prs_ri;
+};
+
+#define MVPP2_N_FLOWS 52
+
+#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
+#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW)
+#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \
+ (port) + 1)
struct mvpp2_cls_flow_entry {
u32 index;
u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -33,7 +193,15 @@ struct mvpp2_cls_lookup_entry {
u32 data;
};
-void mvpp22_init_rss(struct mvpp2_port *port);
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+
+void mvpp22_rss_port_init(struct mvpp2_port *port);
+
+void mvpp22_rss_enable(struct mvpp2_port *port);
+void mvpp22_rss_disable(struct mvpp2_port *port);
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
void mvpp2_cls_init(struct mvpp2 *priv);
@@ -41,4 +209,25 @@ void mvpp2_cls_port_config(struct mvpp2_port *port);
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port);
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_flow_entry *fe);
+
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+ struct mvpp2_cls_lookup_entry *le);
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
new file mode 100644
index 000000000000..f9744a61e5dd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2018 Marvell
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+#include "mvpp2_cls.h"
+
+struct mvpp2_dbgfs_prs_entry {
+ int tid;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_entry {
+ int flow;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_port_flow_entry {
+ struct mvpp2_port *port;
+ struct mvpp2_dbgfs_flow_entry *dbg_fe;
+};
+
+static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits);
+
+static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+
+ u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow);
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
+
+static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
+ const char *flow_name;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ switch (f->flow_type) {
+ case IPV4_FLOW:
+ flow_name = "ipv4";
+ break;
+ case IPV6_FLOW:
+ flow_name = "ipv6";
+ break;
+ case TCP_V4_FLOW:
+ flow_name = "tcp4";
+ break;
+ case TCP_V6_FLOW:
+ flow_name = "tcp6";
+ break;
+ case UDP_V4_FLOW:
+ flow_name = "udp4";
+ break;
+ case UDP_V6_FLOW:
+ flow_name = "udp6";
+ break;
+ default:
+ flow_name = "other";
+ }
+
+ seq_printf(s, "%s\n", flow_name);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
+
+ kfree(flow_entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
+ .open = mvpp2_dbgfs_flow_type_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_flow_type_release,
+};
+
+static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
+
+ f = mvpp2_cls_flow_get(entry->flow);
+ if (!f)
+ return -EINVAL;
+
+ seq_printf(s, "%d\n", f->flow_id);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id);
+
+static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *f;
+ int flow_index;
+ u16 hash_opts;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ hash_opts = mvpp2_flow_get_hek_fields(&fe);
+
+ seq_printf(s, "0x%04x\n", hash_opts);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
+ inode->i_private);
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
+
+ kfree(flow_entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
+ .open = mvpp2_dbgfs_port_flow_hash_opt_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_port_flow_hash_opt_release,
+};
+
+static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+ struct mvpp2_port *port = entry->port;
+ struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *f;
+ int flow_index, engine;
+
+ f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+ if (!f)
+ return -EINVAL;
+
+ flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+ mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+ engine = mvpp2_cls_flow_eng_get(&fe);
+
+ seq_printf(s, "%d\n", engine);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
+
+static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ u32 hits;
+
+ hits = mvpp2_cls_c2_hit_count(port->priv,
+ MVPP22_CLS_C2_RSS_ENTRY(port->id));
+
+ seq_printf(s, "%u\n", hits);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
+
+static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+
+ ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) &
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ seq_printf(s, "%d\n", (qh << 3 | ql));
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
+
+static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2_cls_c2_entry c2;
+ int enabled;
+
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+ enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
+
+ seq_printf(s, "%d\n", enabled);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable);
+
+static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ unsigned char byte[2], enable[2];
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ u16 rvid;
+ int tid;
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ if (!priv->prs_shadow[tid].valid)
+ continue;
+
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+ rvid = ((byte[0] & 0xf) << 8) + byte[1];
+
+ seq_printf(s, "%u\n", rvid);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid);
+
+static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int i;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ mvpp2_prs_init_from_hw(port->priv, &pe, i);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap))
+ seq_printf(s, "%03d\n", i);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser);
+
+static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_port *port = s->private;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ unsigned long pmap;
+ int index, tid;
+
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC ||
+ priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)
+ continue;
+
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+ /* We only want entries active on this port */
+ if (!test_bit(port->id, &pmap))
+ continue;
+
+ /* Read mac addr from entry */
+ for (index = 0; index < ETH_ALEN; index++)
+ mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+ &da_mask[index]);
+
+ seq_printf(s, "%pM\n", da);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter);
+
+static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+
+ seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu);
+
+static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned int pmap;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
+ pmap &= MVPP2_PRS_PORT_MASK;
+
+ seq_printf(s, "%02x\n", pmap);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap);
+
+static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char ai, ai_mask;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
+ ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK;
+
+ seq_printf(s, "%02x %02x\n", ai, ai_mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai);
+
+static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+ unsigned char data[8], mask[8];
+ int i;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ for (i = 0; i < 8; i++)
+ mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]);
+
+ seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata);
+
+static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2_prs_entry pe;
+
+ mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+ seq_printf(s, "%*phN\n", 14, pe.sram);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram);
+
+static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ int val;
+
+ val = mvpp2_prs_hits(entry->priv, entry->tid);
+ if (val < 0)
+ return val;
+
+ seq_printf(s, "%d\n", val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits);
+
+static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
+{
+ struct mvpp2_dbgfs_prs_entry *entry = s->private;
+ struct mvpp2 *priv = entry->priv;
+ int tid = entry->tid;
+
+ seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct mvpp2_dbgfs_prs_entry *entry = seq->private;
+
+ kfree(entry);
+ return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
+ .open = mvpp2_dbgfs_prs_valid_open,
+ .read = seq_read,
+ .release = mvpp2_dbgfs_prs_valid_release,
+};
+
+static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
+ struct mvpp2_port *port,
+ struct mvpp2_dbgfs_flow_entry *entry)
+{
+ struct mvpp2_dbgfs_port_flow_entry *port_entry;
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
+
+ /* This will be freed by 'hash_opts' release op */
+ port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
+ if (!port_entry)
+ return -ENOMEM;
+
+ port_entry->port = port;
+ port_entry->dbg_fe = entry;
+
+ debugfs_create_file("hash_opts", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_hash_opt_fops);
+
+ debugfs_create_file("engine", 0444, port_dir, port_entry,
+ &mvpp2_dbgfs_port_flow_engine_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int flow)
+{
+ struct mvpp2_dbgfs_flow_entry *entry;
+ struct dentry *flow_entry_dir;
+ char flow_entry_name[10];
+ int i, ret;
+
+ sprintf(flow_entry_name, "%02d", flow);
+
+ flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
+ if (!flow_entry_dir)
+ return -ENOMEM;
+
+ /* This will be freed by 'type' release op */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->flow = flow;
+ entry->priv = priv;
+
+ debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
+
+ debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_dec_hits_fops);
+
+ debugfs_create_file("type", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_type_fops);
+
+ debugfs_create_file("id", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_id_fops);
+
+ /* Create entry for each port */
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir,
+ priv->port_list[i], entry);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *flow_dir;
+ int i, ret;
+
+ flow_dir = debugfs_create_dir("flows", parent);
+ if (!flow_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int tid)
+{
+ struct mvpp2_dbgfs_prs_entry *entry;
+ struct dentry *prs_entry_dir;
+ char prs_entry_name[10];
+
+ if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ sprintf(prs_entry_name, "%03d", tid);
+
+ prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
+ if (!prs_entry_dir)
+ return -ENOMEM;
+
+ /* The 'valid' entry's ops will free that */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->tid = tid;
+ entry->priv = priv;
+
+ /* Create each attr */
+ debugfs_create_file("sram", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_sram_fops);
+
+ debugfs_create_file("valid", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_valid_fops);
+
+ debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_lu_fops);
+
+ debugfs_create_file("ai", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_ai_fops);
+
+ debugfs_create_file("header_data", 0644, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hdata_fops);
+
+ debugfs_create_file("hits", 0444, prs_entry_dir, entry,
+ &mvpp2_dbgfs_prs_hits_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *prs_dir;
+ int i, ret;
+
+ prs_dir = debugfs_create_dir("parser", parent);
+ if (!prs_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_init(struct dentry *parent,
+ struct mvpp2_port *port)
+{
+ struct dentry *port_dir;
+
+ port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
+
+ debugfs_create_file("parser_entries", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_parser_fops);
+
+ debugfs_create_file("mac_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_filter_fops);
+
+ debugfs_create_file("vid_filter", 0444, port_dir, port,
+ &mvpp2_dbgfs_port_vid_fops);
+
+ debugfs_create_file("c2_hits", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
+ return 0;
+}
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
+{
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
+{
+ struct dentry *mvpp2_dir, *mvpp2_root;
+ int ret, i;
+
+ mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
+ if (!mvpp2_root) {
+ mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
+ if (IS_ERR(mvpp2_root))
+ return;
+ }
+
+ mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
+ if (IS_ERR(mvpp2_dir))
+ return;
+
+ priv->dbgfs_dir = mvpp2_dir;
+
+ ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->port_count; i++) {
+ ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
+ return;
+err:
+ mvpp2_dbgfs_cleanup(priv);
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 0319ed9ef8b8..32d785b616e1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Marvell PPv2 network controller for Armada 375 SoC.
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/acpi.h>
@@ -66,7 +63,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
-static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
+static int queue_mode = MVPP2_QDIST_MULTI_MODE;
module_param(queue_mode, int, 0444);
MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
@@ -151,9 +148,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.buf_dma_addr;
+ return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
else
- return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
+ MVPP2_DESC_DMA_MASK;
}
static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -166,12 +164,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
offset = dma_addr & MVPP2_TX_DESC_ALIGN;
if (port->priv->hw_version == MVPP21) {
- tx_desc->pp21.buf_dma_addr = addr;
+ tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
tx_desc->pp21.packet_offset = offset;
} else {
- u64 val = (u64)addr;
+ __le64 val = cpu_to_le64(addr);
- tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
+ tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
tx_desc->pp22.buf_dma_addr_ptp |= val;
tx_desc->pp22.packet_offset = offset;
}
@@ -181,9 +179,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
if (port->priv->hw_version == MVPP21)
- return tx_desc->pp21.data_size;
+ return le16_to_cpu(tx_desc->pp21.data_size);
else
- return tx_desc->pp22.data_size;
+ return le16_to_cpu(tx_desc->pp22.data_size);
}
static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
@@ -191,9 +189,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
size_t size)
{
if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.data_size = size;
+ tx_desc->pp21.data_size = cpu_to_le16(size);
else
- tx_desc->pp22.data_size = size;
+ tx_desc->pp22.data_size = cpu_to_le16(size);
}
static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
@@ -211,9 +209,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
unsigned int command)
{
if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.command = command;
+ tx_desc->pp21.command = cpu_to_le32(command);
else
- tx_desc->pp22.command = command;
+ tx_desc->pp22.command = cpu_to_le32(command);
}
static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
@@ -229,36 +227,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_dma_addr;
+ return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
else
- return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
+ MVPP2_DESC_DMA_MASK;
}
static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.buf_cookie;
+ return le32_to_cpu(rx_desc->pp21.buf_cookie);
else
- return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
+ return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
+ MVPP2_DESC_DMA_MASK;
}
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.data_size;
+ return le16_to_cpu(rx_desc->pp21.data_size);
else
- return rx_desc->pp22.data_size;
+ return le16_to_cpu(rx_desc->pp22.data_size);
}
static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
if (port->priv->hw_version == MVPP21)
- return rx_desc->pp21.status;
+ return le32_to_cpu(rx_desc->pp21.status);
else
- return rx_desc->pp22.status;
+ return le32_to_cpu(rx_desc->pp22.status);
}
static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -1735,7 +1735,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
command |= MVPP2_TXD_IP_CSUM_DISABLE;
- if (l3_proto == swab16(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
} else {
@@ -3273,6 +3273,11 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
+static bool mvpp22_rss_is_supported(void)
+{
+ return queue_mode == MVPP2_QDIST_MULTI_MODE;
+}
+
static int mvpp2_open(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -3365,9 +3370,6 @@ static int mvpp2_open(struct net_device *dev)
mvpp2_start_dev(port);
- if (priv->hw_version == MVPP22)
- mvpp22_init_rss(port);
-
/* Start hardware statistics gathering */
queue_delayed_work(priv->stats_queue, &port->stats_work,
MVPP2_MIB_COUNTERS_STATS_DELAY);
@@ -3626,6 +3628,13 @@ static int mvpp2_set_features(struct net_device *dev,
}
}
+ if (changed & NETIF_F_RXHASH) {
+ if (features & NETIF_F_RXHASH)
+ mvpp22_rss_enable(port);
+ else
+ mvpp22_rss_disable(port);
+ }
+
return 0;
}
@@ -3813,6 +3822,94 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
+static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = mvpp2_ethtool_rxfh_get(port, info);
+ break;
+ case ETHTOOL_GRXRINGS:
+ info->data = port->nrxqs;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return ret;
+}
+
+static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = mvpp2_ethtool_rxfh_set(port, info);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+ return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+}
+
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (indir)
+ memcpy(indir, port->indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_CRC32;
+
+ return 0;
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported())
+ return -EOPNOTSUPP;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ return -EOPNOTSUPP;
+
+ if (key)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ memcpy(port->indir, indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+ mvpp22_rss_fill_table(port, port->id);
+ }
+
+ return 0;
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -3844,6 +3941,12 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_pauseparam = mvpp2_ethtool_set_pause_param,
.get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
.set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rxnfc = mvpp2_ethtool_get_rxnfc,
+ .set_rxnfc = mvpp2_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
+ .get_rxfh = mvpp2_ethtool_get_rxfh,
+ .set_rxfh = mvpp2_ethtool_set_rxfh,
+
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -3985,8 +4088,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
MVPP2_MAX_PORTS * priv->max_port_rxqs)
return -EINVAL;
- if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
- (port->ntxqs > MVPP2_MAX_TXQ))
+ if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
+ port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
return -EINVAL;
/* Disable port */
@@ -4075,6 +4178,9 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
+ if (mvpp22_rss_is_supported())
+ mvpp22_rss_port_init(port);
+
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
@@ -4681,6 +4787,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (mvpp22_rss_is_supported())
+ dev->hw_features |= NETIF_F_RXHASH;
+
if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
@@ -5011,6 +5120,12 @@ static int mvpp2_probe(struct platform_device *pdev)
(unsigned long)of_device_get_match_data(&pdev->dev);
}
+ /* multi queue mode isn't supported on PPV2.1, fallback to single
+ * mode
+ */
+ if (priv->hw_version == MVPP21)
+ queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
@@ -5174,6 +5289,8 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ mvpp2_dbgfs_init(priv, pdev->name);
+
platform_set_drvdata(pdev, priv);
return 0;
@@ -5207,6 +5324,8 @@ static int mvpp2_remove(struct platform_device *pdev)
struct fwnode_handle *port_fwnode;
int i = 0;
+ mvpp2_dbgfs_cleanup(priv);
+
flush_workqueue(priv->stats_queue);
destroy_workqueue(priv->stats_queue);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 6bb69f086794..392fd895f278 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Header Parser helpers for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
@@ -30,24 +27,24 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
return -EINVAL;
/* Clear entry invalidation bit */
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+ mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
return 0;
}
/* Initialize tcam entry from hw */
-static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
- struct mvpp2_prs_entry *pe, int tid)
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid)
{
int i;
@@ -60,18 +57,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
- pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
- if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+ if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
return MVPP2_PRS_TCAM_ENTRY_INVALID;
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
- pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+ pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
/* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
- pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+ pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
return 0;
}
@@ -103,42 +100,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
/* Update lookup field in tcam sw entry */
static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
- pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
}
/* Update mask for single port in tcam sw entry */
static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
unsigned int port, bool add)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
if (add)
- pe->tcam.byte[enable_off] &= ~(1 << port);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
else
- pe->tcam.byte[enable_off] |= 1 << port;
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
}
/* Update port map in tcam sw entry */
static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
unsigned int ports)
{
- unsigned char port_mask = MVPP2_PRS_PORT_MASK;
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
- pe->tcam.byte[enable_off] &= ~port_mask;
- pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
+ pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
}
/* Obtain port map from tcam sw entry */
-static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
{
- int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
- return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+ return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
}
/* Set byte of data and its enable bits in tcam sw entry */
@@ -146,55 +136,58 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
unsigned int offs, unsigned char byte,
unsigned char enable)
{
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
- pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
+ pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
}
/* Get byte of data and its enable bits from tcam sw entry */
-static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
- unsigned int offs, unsigned char *byte,
- unsigned char *enable)
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable)
{
- *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
- *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+ int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+ *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
+ *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
}
/* Compare tcam data bytes with a pattern */
static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
u16 data)
{
- int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
u16 tcam_data;
- tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
- if (tcam_data != data)
- return false;
- return true;
+ tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
+ return tcam_data == data;
}
/* Update ai bits in tcam sw entry */
static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
unsigned int bits, unsigned int enable)
{
- int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+ int i;
for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
if (!(enable & BIT(i)))
continue;
if (bits & BIT(i))
- pe->tcam.byte[ai_idx] |= 1 << i;
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
else
- pe->tcam.byte[ai_idx] &= ~(1 << i);
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
}
- pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+ pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
}
/* Get ai bits from tcam sw entry */
static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
{
- return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+ return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
}
/* Set ethertype in tcam sw entry */
@@ -215,16 +208,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
/* Set bits in sram sw entry */
static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
+ u32 val)
{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
}
/* Clear bits in sram sw entry */
static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
- int val)
+ u32 val)
{
- pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+ pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
}
/* Update ri bits in sram sw entry */
@@ -234,15 +227,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
unsigned int i;
for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
- int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
-
if (!(mask & BIT(i)))
continue;
if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
else
- mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_RI_OFFS + i,
+ 1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
}
@@ -251,7 +245,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
/* Obtain ri bits from sram sw entry */
static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
{
- return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+ return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
}
/* Update ai bits in sram sw entry */
@@ -259,16 +253,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
unsigned int bits, unsigned int mask)
{
unsigned int i;
- int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
if (!(mask & BIT(i)))
continue;
if (bits & BIT(i))
- mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
else
- mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+ mvpp2_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_AI_OFFS + i,
+ 1);
mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
}
@@ -278,12 +274,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
{
u8 bits;
- int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
- int ai_en_off = ai_off + 1;
- int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+ /* ai is stored on bits 90->97; so it spreads across two u32 */
+ int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
+ int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
- bits = (pe->sram.byte[ai_off] >> ai_shift) |
- (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+ bits = (pe->sram[ai_off] >> ai_shift) |
+ (pe->sram[ai_off + 1] << (32 - ai_shift));
return bits;
}
@@ -316,8 +312,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
}
/* Set value */
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
- (unsigned char)shift;
+ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
/* Reset and set operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
@@ -346,13 +341,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
/* Set value */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
MVPP2_PRS_SRAM_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
- MVPP2_PRS_SRAM_UDF_BITS)] |=
- (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ offset & MVPP2_PRS_SRAM_UDF_MASK);
/* Set offset type */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
@@ -362,16 +352,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
/* Set offset operation */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
- ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
- (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
-
- pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
- MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
- (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
/* Set base offset as current */
mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
@@ -662,7 +644,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
continue;
mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
if (!match)
continue;
@@ -790,8 +772,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
- mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
+ mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
if (!match)
continue;
@@ -932,8 +914,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
pe.index = tid;
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
@@ -1433,17 +1415,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
pe.index = tid;
- /* Clear tcam data before updating */
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
- pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
-
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
MVPP2_PRS_IPV4_HEAD,
MVPP2_PRS_IPV4_HEAD_MASK);
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
@@ -1644,8 +1622,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
MVPP2_PRS_IPV4_IHL_MASK);
/* Clear ri before updating */
- pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
@@ -2428,6 +2406,41 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
return 0;
}
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ u8 *ri_byte, *ri_byte_mask;
+ int tid, i;
+
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_tcam_first_free(priv,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ ri_byte = (u8 *)&ri;
+ ri_byte_mask = (u8 *)&ri_mask;
+
+ mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ for (i = 0; i < 4; i++) {
+ mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
+ ri_byte_mask[i]);
+ }
+
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
/* Set prs flow for the port */
int mvpp2_prs_def_flow(struct mvpp2_port *port)
{
@@ -2465,3 +2478,19 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
return 0;
}
+
+int mvpp2_prs_hits(struct mvpp2 *priv, int index)
+{
+ u32 val;
+
+ if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
+ return -EINVAL;
+
+ mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
+
+ val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
+
+ val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+
+ return val;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index 22fbbc4c8b28..e22f6c85d380 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -1,22 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header Parser definitions for Marvell PPv2 Network Controller
*
* Copyright (C) 2014 Marvell
*
* Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
+#ifndef _MVPP2_PRS_H_
+#define _MVPP2_PRS_H_
+
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
#include "mvpp2.h"
-#ifndef _MVPP2_PRS_H_
-#define _MVPP2_PRS_H_
-
/* Parser constants */
#define MVPP2_PRS_TCAM_SRAM_SIZE 256
#define MVPP2_PRS_TCAM_WORDS 6
@@ -50,17 +48,25 @@
* The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
*/
#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_AI_MASK 0xff
#define MVPP2_PRS_PORT_MASK 0xff
#define MVPP2_PRS_LU_MASK 0xf
-#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
- (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
-#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
- (((offs) * 2) - ((offs) % 2) + 2)
-#define MVPP2_PRS_TCAM_AI_BYTE 16
-#define MVPP2_PRS_TCAM_PORT_BYTE 17
-#define MVPP2_PRS_TCAM_LU_BYTE 20
-#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
-#define MVPP2_PRS_TCAM_INV_WORD 5
+
+/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */
+#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2)
+#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2)
+
+#define MVPP2_PRS_TCAM_EN(data) ((data) << 16)
+#define MVPP2_PRS_TCAM_AI_WORD 4
+#define MVPP2_PRS_TCAM_AI(ai) (ai)
+#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai))
+#define MVPP2_PRS_TCAM_PORT_WORD 4
+#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8)
+#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p))
+#define MVPP2_PRS_TCAM_LU_WORD 5
+#define MVPP2_PRS_TCAM_LU(lu) (lu)
+#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu))
+#define MVPP2_PRS_TCAM_INV_WORD 5
#define MVPP2_PRS_VID_TCAM_BYTE 2
@@ -146,6 +152,7 @@
#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff
#define MVPP2_PRS_SRAM_UDF_OFFS 73
#define MVPP2_PRS_SRAM_UDF_BITS 8
#define MVPP2_PRS_SRAM_UDF_MASK 0xff
@@ -214,6 +221,10 @@
#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \
+ MVPP2_PRS_RI_IP_FRAG_MASK | \
+ MVPP2_PRS_RI_L4_PROTO_MASK)
+
/* Sram additional info bits assignment */
#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
@@ -255,20 +266,15 @@ enum mvpp2_prs_lookup {
MVPP2_PRS_LU_LAST,
};
-union mvpp2_prs_tcam_entry {
- u32 word[MVPP2_PRS_TCAM_WORDS];
- u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
-};
-
-union mvpp2_prs_sram_entry {
- u32 word[MVPP2_PRS_SRAM_WORDS];
- u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
-};
-
struct mvpp2_prs_entry {
u32 index;
- union mvpp2_prs_tcam_entry tcam;
- union mvpp2_prs_sram_entry sram;
+ u32 tcam[MVPP2_PRS_TCAM_WORDS];
+ u32 sram[MVPP2_PRS_SRAM_WORDS];
+};
+
+struct mvpp2_prs_result_info {
+ u32 ri;
+ u32 ri_mask;
};
struct mvpp2_prs_shadow {
@@ -288,10 +294,21 @@ struct mvpp2_prs_shadow {
int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv);
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+ int tid);
+
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe);
+
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable);
+
int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add);
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type);
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask);
+
int mvpp2_prs_def_flow(struct mvpp2_port *port);
void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port);
@@ -311,4 +328,6 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port);
int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da);
+int mvpp2_prs_hits(struct mvpp2 *priv, int index);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 16b10d01fcf4..3f400770fcd8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
- srq.o resource_tracker.o
+ srq.o resource_tracker.o crdump.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index e2b6b0cac1ac..c81d15bf259c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -178,10 +178,12 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
dev = persist->dev;
mlx4_err(dev, "device is going to be reset\n");
- if (mlx4_is_slave(dev))
+ if (mlx4_is_slave(dev)) {
err = mlx4_reset_slave(dev);
- else
+ } else {
+ mlx4_crdump_collect(dev);
err = mlx4_reset_master(dev);
+ }
if (!err) {
mlx4_err(dev, "device was reset successfully\n");
@@ -212,7 +214,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
- err = mlx4_restart_one(persist->pdev);
+ err = mlx4_restart_one(persist->pdev, false, NULL);
mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
new file mode 100644
index 000000000000..88316c743820
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx4.h"
+
+#define BAD_ACCESS 0xBADACCE5
+#define HEALTH_BUFFER_SIZE 0x40
+#define CR_ENABLE_BIT swab32(BIT(6))
+#define CR_ENABLE_BIT_OFFSET 0xF3F04
+#define MAX_NUM_OF_DUMPS_TO_STORE (8)
+
+static const char *region_cr_space_str = "cr-space";
+static const char *region_fw_health_str = "fw-health";
+
+/* Set to true in case cr enable bit was set to true before crdump */
+static bool crdump_enbale_bit_set;
+
+static void crdump_enable_crspace_access(struct mlx4_dev *dev,
+ u8 __iomem *cr_space)
+{
+ /* Get current enable bit value */
+ crdump_enbale_bit_set =
+ readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT;
+
+ /* Enable FW CR filter (set bit6 to 0) */
+ if (crdump_enbale_bit_set)
+ writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT,
+ cr_space + CR_ENABLE_BIT_OFFSET);
+
+ /* Enable block volatile crspace accesses */
+ writel(swab32(1), cr_space + dev->caps.health_buffer_addrs +
+ HEALTH_BUFFER_SIZE);
+}
+
+static void crdump_disable_crspace_access(struct mlx4_dev *dev,
+ u8 __iomem *cr_space)
+{
+ /* Disable block volatile crspace accesses */
+ writel(0, cr_space + dev->caps.health_buffer_addrs +
+ HEALTH_BUFFER_SIZE);
+
+ /* Restore FW CR filter value (set bit6 to original value) */
+ if (crdump_enbale_bit_set)
+ writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT,
+ cr_space + CR_ENABLE_BIT_OFFSET);
+}
+
+static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
+ u8 __iomem *cr_space,
+ u32 id)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+ unsigned long cr_res_size;
+ u8 *crspace_data;
+ int offset;
+ int err;
+
+ if (!crdump->region_crspace) {
+ mlx4_err(dev, "crdump: cr-space region is NULL\n");
+ return;
+ }
+
+ /* Try to collect CR space */
+ cr_res_size = pci_resource_len(pdev, 0);
+ crspace_data = kvmalloc(cr_res_size, GFP_KERNEL);
+ if (crspace_data) {
+ for (offset = 0; offset < cr_res_size; offset += 4)
+ *(u32 *)(crspace_data + offset) =
+ readl(cr_space + offset);
+
+ err = devlink_region_snapshot_create(crdump->region_crspace,
+ cr_res_size, crspace_data,
+ id, &kvfree);
+ if (err) {
+ kvfree(crspace_data);
+ mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+ region_cr_space_str, id, err);
+ } else {
+ mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+ id, region_cr_space_str);
+ }
+ } else {
+ mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n");
+ }
+}
+
+static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
+ u8 __iomem *cr_space,
+ u32 id)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ u8 *health_data;
+ int offset;
+ int err;
+
+ if (!crdump->region_fw_health) {
+ mlx4_err(dev, "crdump: fw-health region is NULL\n");
+ return;
+ }
+
+ /* Try to collect health buffer */
+ health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL);
+ if (health_data) {
+ u8 __iomem *health_buf_start =
+ cr_space + dev->caps.health_buffer_addrs;
+
+ for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4)
+ *(u32 *)(health_data + offset) =
+ readl(health_buf_start + offset);
+
+ err = devlink_region_snapshot_create(crdump->region_fw_health,
+ HEALTH_BUFFER_SIZE,
+ health_data,
+ id, &kvfree);
+ if (err) {
+ kvfree(health_data);
+ mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+ region_fw_health_str, id, err);
+ } else {
+ mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+ id, region_fw_health_str);
+ }
+ } else {
+ mlx4_err(dev, "crdump: Failed to allocate health buffer\n");
+ }
+}
+
+int mlx4_crdump_collect(struct mlx4_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+ unsigned long cr_res_size;
+ u8 __iomem *cr_space;
+ u32 id;
+
+ if (!dev->caps.health_buffer_addrs) {
+ mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n");
+ return 0;
+ }
+
+ if (!crdump->snapshot_enable) {
+ mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n");
+ return 0;
+ }
+
+ cr_res_size = pci_resource_len(pdev, 0);
+
+ cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size);
+ if (!cr_space) {
+ mlx4_err(dev, "crdump: Failed to map pci cr region\n");
+ return -ENODEV;
+ }
+
+ crdump_enable_crspace_access(dev, cr_space);
+
+ /* Get the available snapshot ID for the dumps */
+ id = devlink_region_shapshot_id_get(devlink);
+
+ /* Try to capture dumps */
+ mlx4_crdump_collect_crspace(dev, cr_space, id);
+ mlx4_crdump_collect_fw_health(dev, cr_space, id);
+
+ crdump_disable_crspace_access(dev, cr_space);
+
+ iounmap(cr_space);
+ return 0;
+}
+
+int mlx4_crdump_init(struct mlx4_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ struct pci_dev *pdev = dev->persist->pdev;
+
+ crdump->snapshot_enable = false;
+
+ /* Create cr-space region */
+ crdump->region_crspace =
+ devlink_region_create(devlink,
+ region_cr_space_str,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
+ if (IS_ERR(crdump->region_crspace))
+ mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+ region_cr_space_str,
+ PTR_ERR(crdump->region_crspace));
+
+ /* Create fw-health region */
+ crdump->region_fw_health =
+ devlink_region_create(devlink,
+ region_fw_health_str,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
+ if (IS_ERR(crdump->region_fw_health))
+ mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+ region_fw_health_str,
+ PTR_ERR(crdump->region_fw_health));
+
+ return 0;
+}
+
+void mlx4_crdump_end(struct mlx4_dev *dev)
+{
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+
+ devlink_region_destroy(crdump->region_fw_health);
+ devlink_region_destroy(crdump->region_crspace);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 65eb06e017e4..6785661d1a72 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2926,7 +2926,6 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return mlx4_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_id = mlx4_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0227786308af..1857ee0f0871 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -688,15 +688,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb);
+ return fallback(dev, skb, NULL);
- return fallback(dev, skb) % rings_p_up;
+ return fallback(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 46dcbfbe4c5e..babcfd9c0571 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -825,7 +825,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
-
+#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1082,6 +1082,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->rl_caps.min_unit = size >> 14;
}
+ MLX4_GET(dev_cap->health_buffer_addrs, outbox,
+ QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET);
+
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index cd6399c76bfd..650ae08c71de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -128,6 +128,7 @@ struct mlx4_dev_cap {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 872014702fc1..2d979a652b7b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -177,6 +177,131 @@ struct mlx4_port_config {
static atomic_t pf_loading = ATOMIC_INIT(0);
+static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ ctx->val.vbool = !!mlx4_internal_err_reset;
+ return 0;
+}
+
+static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ mlx4_internal_err_reset = ctx->val.vbool;
+ return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+
+ ctx->val.vbool = dev->persist->crdump.snapshot_enable;
+ return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+
+ dev->persist->crdump.snapshot_enable = ctx->val.vbool;
+ return 0;
+}
+
+static int
+mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ u32 value = val.vu32;
+
+ if (value < 1 || value > 128)
+ return -ERANGE;
+
+ if (!is_power_of_2(value)) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum mlx4_devlink_param_id {
+ MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+};
+
+static const struct devlink_param mlx4_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ mlx4_devlink_ierr_reset_get,
+ mlx4_devlink_ierr_reset_set, NULL),
+ DEVLINK_PARAM_GENERIC(MAX_MACS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx4_devlink_max_macs_validate),
+ DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ mlx4_devlink_crdump_snapshot_get,
+ mlx4_devlink_crdump_snapshot_set, NULL),
+ DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+};
+
+static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ int err;
+
+ err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
+ if (err)
+ mlx4_warn(dev,
+ "devlink set parameter %u value failed (err = %d)",
+ param_id, err);
+}
+
+static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = !!mlx4_internal_err_reset;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
+
+ value.vu32 = 1UL << log_num_mac;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+
+ value.vbool = enable_64b_cqe_eqe;
+ mlx4_devlink_set_init_value(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
+
+ value.vbool = enable_4k_uar;
+ mlx4_devlink_set_init_value(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
+
+ value.vbool = false;
+ mlx4_devlink_set_init_value(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
+}
+
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
@@ -428,6 +553,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
dev->caps.wol_port[1] = dev_cap->wol_port[1];
dev->caps.wol_port[2] = dev_cap->wol_port[2];
+ dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs;
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
@@ -3711,10 +3837,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
}
}
- err = mlx4_catas_init(&priv->dev);
+ err = mlx4_crdump_init(&priv->dev);
if (err)
goto err_release_regions;
+ err = mlx4_catas_init(&priv->dev);
+ if (err)
+ goto err_crdump;
+
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
if (err)
goto err_catas;
@@ -3724,6 +3854,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
err_catas:
mlx4_catas_end(&priv->dev);
+err_crdump:
+ mlx4_crdump_end(&priv->dev);
+
err_release_regions:
pci_release_regions(pdev);
@@ -3757,8 +3890,68 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
return __set_port_type(info, mlx4_port_type);
}
+static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+ union devlink_param_value saved_value;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ &saved_value);
+ if (!err && mlx4_internal_err_reset != saved_value.vbool) {
+ mlx4_internal_err_reset = saved_value.vbool;
+ /* Notify on value changed on runtime configuration mode */
+ devlink_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
+ }
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
+ if (!err)
+ log_num_mac = order_base_2(saved_value.vu32);
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ &saved_value);
+ if (!err)
+ enable_64b_cqe_eqe = saved_value.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ &saved_value);
+ if (!err)
+ enable_4k_uar = saved_value.vbool;
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ &saved_value);
+ if (!err && crdump->snapshot_enable != saved_value.vbool) {
+ crdump->snapshot_enable = saved_value.vbool;
+ devlink_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
+ }
+}
+
+static int mlx4_devlink_reload(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_dev_persistent *persist = dev->persist;
+ int err;
+
+ if (persist->num_vfs)
+ mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
+ err = mlx4_restart_one(persist->pdev, true, devlink);
+ if (err)
+ mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err);
+
+ return err;
+}
+
static const struct devlink_ops mlx4_devlink_ops = {
.port_type_set = mlx4_devlink_port_type_set,
+ .reload = mlx4_devlink_reload,
};
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -3792,14 +3985,21 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ret = devlink_register(devlink, &pdev->dev);
if (ret)
goto err_persist_free;
-
- ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ ret = devlink_params_register(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
if (ret)
goto err_devlink_unregister;
+ mlx4_devlink_set_params_init_values(devlink);
+ ret = __mlx4_init_one(pdev, id->driver_data, priv);
+ if (ret)
+ goto err_params_unregister;
pci_save_state(pdev);
return 0;
+err_params_unregister:
+ devlink_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
devlink_unregister(devlink);
err_persist_free:
@@ -3929,6 +4129,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
else
mlx4_info(dev, "%s: interface is down\n", __func__);
mlx4_catas_end(dev);
+ mlx4_crdump_end(dev);
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
@@ -3936,6 +4137,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
mlx4_pci_disable_device(dev);
+ devlink_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
@@ -3960,7 +4163,7 @@ static int restore_current_port_types(struct mlx4_dev *dev,
return err;
}
-int mlx4_restart_one(struct pci_dev *pdev)
+int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
@@ -3973,6 +4176,8 @@ int mlx4_restart_one(struct pci_dev *pdev)
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mlx4_unload_one(pdev);
+ if (reload)
+ mlx4_devlink_param_load_driverinit_values(devlink);
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
if (err) {
mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cb9e923e8399..6e016092a6f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1042,7 +1042,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
int mlx4_catas_init(struct mlx4_dev *dev);
void mlx4_catas_end(struct mlx4_dev *dev);
-int mlx4_restart_one(struct pci_dev *pdev);
+int mlx4_crdump_init(struct mlx4_dev *dev);
+void mlx4_crdump_end(struct mlx4_dev *dev);
+int mlx4_restart_one(struct pci_dev *pdev, bool reload,
+ struct devlink *devlink);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
@@ -1227,6 +1230,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
int mlx4_comm_internal_err(u32 slave_read);
+int mlx4_crdump_collect(struct mlx4_dev *dev);
+
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ace6545f82e6..c3228b89df46 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9efbf193ad5a..d923f2f58608 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
- en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
+ en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o \
+ vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
new file mode 100644
index 000000000000..c13260467750
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
@@ -0,0 +1,37 @@
+#ifndef __MLX5E_ACCEL_H__
+#define __MLX5E_ACCEL_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "en.h"
+
+static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
+{
+ __be16 *ethtype;
+
+ if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
+ return false;
+ ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
+ if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ return false;
+ return true;
+}
+
+static inline void remove_metadata_hdr(struct sk_buff *skb)
+{
+ struct ethhdr *old_eth;
+ struct ethhdr *new_eth;
+
+ /* Remove the metadata from the buffer */
+ old_eth = (struct ethhdr *)skb->data;
+ new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
+ memmove(new_eth, old_eth, 2 * ETH_ALEN);
+ /* Ethertype is already in its new place */
+ skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+}
+
+#endif /* CONFIG_MLX5_ACCEL */
+
+#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
index 77ac19f38cbe..da7bd26368f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -37,17 +37,26 @@
#include "mlx5_core.h"
#include "fpga/tls.h"
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx)
{
- return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid);
+ return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, p_swid,
+ direction_sx);
}
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx)
{
- mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
+ mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
+}
+
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn)
+{
+ return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
}
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index 6f9c9f446ecc..def4093ebfae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits {
u8 reserved_at_2[0x1e];
};
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid);
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx);
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx);
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
@@ -72,10 +76,14 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
#else
static inline int
-mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
-static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
+mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx) { return -ENOTSUPP; }
+static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ bool direction_sx) { }
+static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
+ u32 seq, u64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index eb9eb7aa953a..e1b237ccdf56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -137,7 +137,6 @@ struct page_pool;
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
-#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
#define MLX5E_UMR_WQE_INLINE_SZ \
@@ -866,7 +865,8 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback);
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index f20074dbef32..39a5d13ba459 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -34,12 +34,11 @@
#ifndef __MLX5E_EN_ACCEL_H__
#define __MLX5E_EN_ACCEL_H__
-#ifdef CONFIG_MLX5_ACCEL
-
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
+#include "en_accel/rxtx.h"
#include "en.h"
static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
@@ -64,9 +63,13 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
}
#endif
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi);
+ if (unlikely(!skb))
+ return NULL;
+ }
+
return skb;
}
-#endif /* CONFIG_MLX5_ACCEL */
-
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index c245d8e78509..128a82b1dbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -37,6 +37,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
+#include "accel/accel.h"
#include "en.h"
enum {
@@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
}
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 *cqe_bcnt)
{
struct mlx5e_ipsec_metadata *mdata;
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
struct xfrm_state *xs;
- __be16 *ethtype;
- /* Detect inline metadata */
- if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
- return skb;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+ if (!is_metadata_hdr_valid(skb))
return skb;
/* Use the metadata */
@@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
return NULL;
}
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+ remove_metadata_hdr(skb);
+ *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
return skb;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2bfbbef1b054..ca47c0540904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -41,7 +41,7 @@
#include "en.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
- struct sk_buff *skb);
+ struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c
new file mode 100644
index 000000000000..7b7ec3998e84
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c
@@ -0,0 +1,109 @@
+#include "en_accel/rxtx.h"
+
+static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb,
+ struct sk_buff *nskb,
+ int remaining)
+{
+ int bytes_needed = remaining, remaining_headlen, remaining_page_offset;
+ int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
+ int payload_len = remaining + sizeof(struct udphdr);
+ int k = 0, i, j;
+
+ skb_copy_bits(skb, 0, nskb->data, headlen);
+ nskb->dev = skb->dev;
+ skb_reset_mac_header(nskb);
+ skb_set_network_header(nskb, skb_network_offset(skb));
+ skb_set_transport_header(nskb, skb_transport_offset(skb));
+ skb_set_tail_pointer(nskb, headlen);
+
+ /* How many frags do we need? */
+ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+ bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ k++;
+ if (bytes_needed <= 0)
+ break;
+ }
+
+ /* Fill the first frag and split it if necessary */
+ j = skb_shinfo(skb)->nr_frags - k;
+ remaining_page_offset = -bytes_needed;
+ skb_fill_page_desc(nskb, 0,
+ skb_shinfo(skb)->frags[j].page.p,
+ skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset,
+ skb_shinfo(skb)->frags[j].size - remaining_page_offset);
+
+ skb_frag_ref(skb, j);
+
+ /* Fill the rest of the frags */
+ for (i = 1; i < k; i++) {
+ j = skb_shinfo(skb)->nr_frags - k + i;
+
+ skb_fill_page_desc(nskb, i,
+ skb_shinfo(skb)->frags[j].page.p,
+ skb_shinfo(skb)->frags[j].page_offset,
+ skb_shinfo(skb)->frags[j].size);
+ skb_frag_ref(skb, j);
+ }
+ skb_shinfo(nskb)->nr_frags = k;
+
+ remaining_headlen = remaining - skb->data_len;
+
+ /* headlen contains remaining data? */
+ if (remaining_headlen > 0)
+ skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen,
+ remaining_headlen);
+ nskb->len = remaining + headlen;
+ nskb->data_len = payload_len - sizeof(struct udphdr) +
+ max_t(int, 0, remaining_headlen);
+ nskb->protocol = skb->protocol;
+ if (nskb->protocol == htons(ETH_P_IP)) {
+ ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) +
+ skb_shinfo(skb)->gso_segs);
+ ip_hdr(nskb)->tot_len =
+ htons(payload_len + sizeof(struct iphdr));
+ } else {
+ ipv6_hdr(nskb)->payload_len = htons(payload_len);
+ }
+ udp_hdr(nskb)->len = htons(payload_len);
+ skb_shinfo(nskb)->gso_size = 0;
+ nskb->ip_summed = skb->ip_summed;
+ nskb->csum_start = skb->csum_start;
+ nskb->csum_offset = skb->csum_offset;
+ nskb->queue_mapping = skb->queue_mapping;
+}
+
+/* might send skbs and update wqe and pi */
+struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
+{
+ int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+ int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
+ int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size;
+ struct sk_buff *nskb;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr));
+ else
+ ipv6_hdr(skb)->payload_len = htons(payload_len);
+ udp_hdr(skb)->len = htons(payload_len);
+ if (!remaining)
+ return skb;
+
+ sq->stats->udp_seg_rem++;
+ nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ sq->stats->dropped++;
+ return NULL;
+ }
+
+ mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining);
+
+ skb_shinfo(skb)->gso_segs--;
+ pskb_trim(skb, skb->len - remaining);
+ mlx5e_sq_xmit(sq, skb, *wqe, *pi);
+ mlx5e_sq_fetch_wqe(sq, wqe, pi);
+ return nskb;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h
new file mode 100644
index 000000000000..ed42699a78b3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h
@@ -0,0 +1,14 @@
+
+#ifndef __MLX5E_EN_ACCEL_RX_TX_H__
+#define __MLX5E_EN_ACCEL_RX_TX_H__
+
+#include <linux/skbuff.h>
+#include "en.h"
+
+struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index d167845271c3..eddd7702680b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
u32 caps = mlx5_accel_tls_device_caps(mdev);
int ret = -ENOMEM;
void *flow;
-
- if (direction != TLS_OFFLOAD_CTX_DIR_TX)
- return -EINVAL;
+ u32 swid;
flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
if (!flow)
@@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
if (ret)
goto free_flow;
+ ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
+ start_offload_tcp_sn, &swid,
+ direction == TLS_OFFLOAD_CTX_DIR_TX);
+ if (ret < 0)
+ goto free_flow;
+
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- struct mlx5e_tls_offload_context *tx_ctx =
+ struct mlx5e_tls_offload_context_tx *tx_ctx =
mlx5e_get_tls_tx_context(tls_ctx);
- u32 swid;
-
- ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, &swid);
- if (ret < 0)
- goto free_flow;
tx_ctx->swid = htonl(swid);
tx_ctx->expected_seq = start_offload_tcp_sn;
+ } else {
+ struct mlx5e_tls_offload_context_rx *rx_ctx =
+ mlx5e_get_tls_rx_context(tls_ctx);
+
+ rx_ctx->handle = htonl(swid);
}
return 0;
@@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev,
enum tls_offload_ctx_dir direction)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ unsigned int handle;
- if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
- u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
+ handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
+ mlx5e_get_tls_tx_context(tls_ctx)->swid :
+ mlx5e_get_tls_rx_context(tls_ctx)->handle);
- mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
- } else {
- netdev_err(netdev, "unsupported direction %d\n", direction);
- }
+ mlx5_accel_tls_del_flow(priv->mdev, handle,
+ direction == TLS_OFFLOAD_CTX_DIR_TX);
+}
+
+static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
+ u32 seq, u64 rcd_sn)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_tls_offload_context_rx *rx_ctx;
+
+ rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
+
+ netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
+ be64_to_cpu(rcd_sn));
+ mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
}
static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del,
+ .tls_dev_resync_rx = mlx5e_tls_resync_rx,
};
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
+ u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
struct net_device *netdev = priv->netdev;
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
- netdev->features |= NETIF_F_HW_TLS_TX;
- netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ if (caps & MLX5_ACCEL_TLS_TX) {
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ }
+
+ if (caps & MLX5_ACCEL_TLS_RX) {
+ netdev->features |= NETIF_F_HW_TLS_RX;
+ netdev->hw_features |= NETIF_F_HW_TLS_RX;
+ }
+
+ if (!(caps & MLX5_ACCEL_TLS_LRO)) {
+ netdev->features &= ~NETIF_F_LRO;
+ netdev->hw_features &= ~NETIF_F_LRO;
+ }
+
netdev->tlsdev_ops = &mlx5e_tls_ops;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index b6162178f621..3f5d72163b56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required;
+ atomic64_t rx_tls_drop_resync_request;
+ atomic64_t rx_tls_resync_request;
+ atomic64_t rx_tls_resync_reply;
+ atomic64_t rx_tls_auth_fail;
};
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
};
-struct mlx5e_tls_offload_context {
- struct tls_offload_context base;
+struct mlx5e_tls_offload_context_tx {
+ struct tls_offload_context_tx base;
u32 expected_seq;
__be32 swid;
};
-static inline struct mlx5e_tls_offload_context *
+static inline struct mlx5e_tls_offload_context_tx *
mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
{
- BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) >
- TLS_OFFLOAD_CONTEXT_SIZE);
- return container_of(tls_offload_ctx(tls_ctx),
- struct mlx5e_tls_offload_context,
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ return container_of(tls_offload_ctx_tx(tls_ctx),
+ struct mlx5e_tls_offload_context_tx,
+ base);
+}
+
+struct mlx5e_tls_offload_context_rx {
+ struct tls_offload_context_rx base;
+ __be32 handle;
+};
+
+static inline struct mlx5e_tls_offload_context_rx *
+mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_RX);
+ return container_of(tls_offload_ctx_rx(tls_ctx),
+ struct mlx5e_tls_offload_context_rx,
base);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 15aef71d1957..92d37459850e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -33,6 +33,14 @@
#include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h"
+#include "accel/accel.h"
+
+#include <net/inet6_hashtables.h>
+#include <linux/ipv6.h>
+
+#define SYNDROM_DECRYPTED 0x30
+#define SYNDROM_RESYNC_REQUEST 0x31
+#define SYNDROM_AUTH_FAILED 0x32
#define SYNDROME_OFFLOAD_REQUIRED 32
#define SYNDROME_SYNC 33
@@ -44,10 +52,26 @@ struct sync_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-struct mlx5e_tls_metadata {
+struct recv_metadata_content {
+ u8 syndrome;
+ u8 reserved;
+ __be32 sync_seq;
+} __packed;
+
+struct send_metadata_content {
/* One byte of syndrome followed by 3 bytes of swid */
__be32 syndrome_swid;
__be16 first_seq;
+} __packed;
+
+struct mlx5e_tls_metadata {
+ union {
+ /* from fpga to host */
+ struct recv_metadata_content recv;
+ /* from host to fpga */
+ struct send_metadata_content send;
+ unsigned char raw[6];
+ } __packed content;
/* packet type ID field */
__be16 ethertype;
} __packed;
@@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
2 * ETH_ALEN);
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
- pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
+ pet->content.send.syndrome_swid =
+ htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
return 0;
}
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
+static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
u32 tcp_seq, struct sync_info *info)
{
int remaining, i = 0, ret = -EINVAL;
@@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
memcpy(pet, &syndrome, sizeof(syndrome));
- pet->first_seq = htons(tcp_seq);
+ pet->content.send.first_seq = htons(tcp_seq);
/* MLX5 devices don't care about the checksum partial start, offset
* and pseudo header
@@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
}
static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
+mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi,
@@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
u16 *pi)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_tls_offload_context *context;
+ struct mlx5e_tls_offload_context_tx *context;
struct tls_context *tls_ctx;
u32 expected_seq;
int datalen;
@@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
out:
return skb;
}
+
+static int tls_update_resync_sn(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_tls_metadata *mdata)
+{
+ struct sock *sk = NULL;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ __be32 seq;
+
+ if (mdata->ethertype != htons(ETH_P_IP))
+ return -EINVAL;
+
+ iph = (struct iphdr *)(mdata + 1);
+
+ th = ((void *)iph) + iph->ihl * 4;
+
+ if (iph->version == 4) {
+ sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ iph->saddr, th->source, iph->daddr,
+ th->dest, netdev->ifindex);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
+
+ sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ &ipv6h->saddr, th->source,
+ &ipv6h->daddr, th->dest,
+ netdev->ifindex, 0);
+#endif
+ }
+ if (!sk || sk->sk_state == TCP_TIME_WAIT) {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
+ goto out;
+ }
+
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+
+ memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
+ tls_offload_rx_resync_request(sk, seq);
+out:
+ return 0;
+}
+
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ u32 *cqe_bcnt)
+{
+ struct mlx5e_tls_metadata *mdata;
+ struct mlx5e_priv *priv;
+
+ if (!is_metadata_hdr_valid(skb))
+ return;
+
+ /* Use the metadata */
+ mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
+ switch (mdata->content.recv.syndrome) {
+ case SYNDROM_DECRYPTED:
+ skb->decrypted = 1;
+ break;
+ case SYNDROM_RESYNC_REQUEST:
+ tls_update_resync_sn(netdev, skb, mdata);
+ priv = netdev_priv(netdev);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
+ break;
+ case SYNDROM_AUTH_FAILED:
+ /* Authentication failure will be observed and verified by kTLS */
+ priv = netdev_priv(netdev);
+ atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
+ break;
+ default:
+ /* Bypass the metadata header to others */
+ return;
+ }
+
+ remove_metadata_hdr(skb);
+ *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 405dfd302225..311667ec71b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe **wqe,
u16 *pi);
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ u32 *cqe_bcnt);
+
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dae4156a710d..712b9766485f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -270,12 +270,9 @@ void mlx5e_update_stats_work(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
update_stats_work);
+
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->profile->update_stats(priv);
- queue_delayed_work(priv->wq, dwork,
- msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
- }
+ priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock);
}
@@ -352,8 +349,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info),
- GFP_KERNEL, cpu_to_node(c->cpu));
+ rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
+ sizeof(*rq->mpwqe.info)),
+ GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
return -ENOMEM;
@@ -670,7 +668,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err_free:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->mpwqe.info);
+ kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -702,7 +700,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->mpwqe.info);
+ kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -965,15 +963,15 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kfree(sq->db.di);
+ kvfree(sq->db.di);
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di),
- GFP_KERNEL, numa);
+ sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)),
+ GFP_KERNEL, numa);
if (!sq->db.di) {
mlx5e_free_xdpsq_db(sq);
return -ENOMEM;
@@ -1024,15 +1022,16 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
- kfree(sq->db.ico_wqe);
+ kvfree(sq->db.ico_wqe);
}
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe),
- GFP_KERNEL, numa);
+ sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
+ sizeof(*sq->db.ico_wqe)),
+ GFP_KERNEL, numa);
if (!sq->db.ico_wqe)
return -ENOMEM;
@@ -1077,8 +1076,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
- kfree(sq->db.wqe_info);
- kfree(sq->db.dma_fifo);
+ kvfree(sq->db.wqe_info);
+ kvfree(sq->db.dma_fifo);
}
static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
@@ -1086,10 +1085,12 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
- sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo),
- GFP_KERNEL, numa);
- sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info),
- GFP_KERNEL, numa);
+ sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
+ sizeof(*sq->db.dma_fifo)),
+ GFP_KERNEL, numa);
+ sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
+ sizeof(*sq->db.wqe_info)),
+ GFP_KERNEL, numa);
if (!sq->db.dma_fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
@@ -1893,7 +1894,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
int err;
int eqn;
- c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -1979,7 +1980,7 @@ err_close_icosq_cq:
err_napi_del:
netif_napi_del(&c->napi);
- kfree(c);
+ kvfree(c);
return err;
}
@@ -2018,7 +2019,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
- kfree(c);
+ kvfree(c);
}
#define DEFAULT_FRAG_SIZE (2048)
@@ -2276,7 +2277,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
- cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+ cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
if (!chs->c || !cparam)
goto err_free;
@@ -2287,7 +2288,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- kfree(cparam);
+ kvfree(cparam);
return 0;
err_close_channels:
@@ -2296,7 +2297,7 @@ err_close_channels:
err_free:
kfree(chs->c);
- kfree(cparam);
+ kvfree(cparam);
chs->num = 0;
return err;
}
@@ -3371,7 +3372,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
priv);
@@ -3405,6 +3406,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ /* update HW stats in background for next time */
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
@@ -4192,7 +4196,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return mlx5e_xdp_set(dev, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_id = mlx5e_xdp_query(dev);
- xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
@@ -4592,6 +4595,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2b8040a3cdbd..8e3c5b4b90ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
return 0;
@@ -897,6 +897,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ /* update HW stats in background for next time */
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d3a1dd20e41d..1d5295ee863c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -44,6 +44,7 @@
#include "en_rep.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
@@ -487,7 +488,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->pc += MLX5E_UMR_WQEBBS;
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
return 0;
@@ -601,6 +602,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (!rq->mpwqe.umr_in_progress)
mlx5e_alloc_rx_mpwqe(rq, wq->head);
+ else
+ rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2;
return false;
}
@@ -795,6 +798,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct net_device *netdev = rq->netdev;
skb->mac_len = ETH_HLEN;
+
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
+#endif
+
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -1261,7 +1269,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
if (unlikely(mpwrq_is_filler_cqe(cqe))) {
- rq->stats->mpwqe_filler++;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
goto mpwrq_cqe_out;
}
@@ -1383,6 +1394,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ rq->stats->xdp_tx_cqe += i;
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
@@ -1534,7 +1547,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_free_rx_wqe(rq, wi);
goto wq_cyc_pop;
}
- skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
+ skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
if (unlikely(!skb)) {
mlx5e_free_rx_wqe(rq, wi);
goto wq_cyc_pop;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1646859974ce..c0507fada0be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
@@ -59,6 +60,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
@@ -67,10 +69,13 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -80,6 +85,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
};
@@ -133,9 +143,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe;
s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
+ s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -145,6 +157,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
+ s->rx_congst_umr += rq_stats->congst_umr;
+ s->ch_events += ch_stats->events;
+ s->ch_poll += ch_stats->poll;
+ s->ch_arm += ch_stats->arm;
+ s->ch_aff_change += ch_stats->aff_change;
s->ch_eq_rearm += ch_stats->eq_rearm;
for (j = 0; j < priv->max_opened_tc; j++) {
@@ -157,8 +174,10 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_nop += sq_stats->nop;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
+ s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_cqe_err += sq_stats->cqe_err;
s->tx_recover += sq_stats->recover;
@@ -170,6 +189,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
#endif
+ s->tx_cqes += sq_stats->cqes;
}
}
@@ -1107,12 +1127,14 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
@@ -1122,6 +1144,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1140,11 +1163,16 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
};
static const struct counter_desc ch_stats_desc[] = {
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 643153bb3607..fc3f66003edd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -61,6 +61,7 @@ struct mlx5e_sw_stats {
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
u64 tx_added_vlan_packets;
+ u64 tx_nop;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_removed_vlan_packets;
@@ -70,6 +71,7 @@ struct mlx5e_sw_stats {
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
u64 rx_xdp_tx;
+ u64 rx_xdp_tx_cqe;
u64 rx_xdp_tx_full;
u64 tx_csum_none;
u64 tx_csum_partial;
@@ -78,10 +80,13 @@ struct mlx5e_sw_stats {
u64 tx_queue_dropped;
u64 tx_xmit_more;
u64 tx_recover;
+ u64 tx_cqes;
u64 tx_queue_wake;
+ u64 tx_udp_seg_rem;
u64 tx_cqe_err;
u64 rx_wqe_err;
- u64 rx_mpwqe_filler;
+ u64 rx_mpwqe_filler_cqes;
+ u64 rx_mpwqe_filler_strides;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
@@ -91,6 +96,11 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 rx_congst_umr;
+ u64 ch_events;
+ u64 ch_poll;
+ u64 ch_arm;
+ u64 ch_aff_change;
u64 ch_eq_rearm;
#ifdef CONFIG_MLX5_EN_TLS
@@ -169,9 +179,11 @@ struct mlx5e_rq_stats {
u64 removed_vlan_packets;
u64 xdp_drop;
u64 xdp_tx;
+ u64 xdp_tx_cqe;
u64 xdp_tx_full;
u64 wqe_err;
- u64 mpwqe_filler;
+ u64 mpwqe_filler_cqes;
+ u64 mpwqe_filler_strides;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
@@ -181,6 +193,7 @@ struct mlx5e_rq_stats {
u64 cache_empty;
u64 cache_busy;
u64 cache_waive;
+ u64 congst_umr;
};
struct mlx5e_sq_stats {
@@ -196,6 +209,7 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+ u64 udp_seg_rem;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_ooo;
u64 tls_resync_bytes;
@@ -206,11 +220,16 @@ struct mlx5e_sq_stats {
u64 dropped;
u64 recover;
/* dirtied @completion */
- u64 wake ____cacheline_aligned_in_smp;
+ u64 cqes ____cacheline_aligned_in_smp;
+ u64 wake;
u64 cqe_err;
};
struct mlx5e_ch_stats {
+ u64 events;
+ u64 poll;
+ u64 arm;
+ u64 aff_change;
u64 eq_rearm;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f29deb44bf3b..9106ea45e3cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -111,10 +111,11 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb);
+ int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
@@ -228,7 +229,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
+ else
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
stats->tso_packets++;
stats->tso_bytes += skb->len - ihs;
}
@@ -443,12 +447,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
-#ifdef CONFIG_MLX5_ACCEL
/* might send skbs and update wqe and pi */
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
if (unlikely(!skb))
return NETDEV_TX_OK;
-#endif
+
return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
@@ -466,6 +469,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
+ struct mlx5e_sq_stats *stats;
struct mlx5e_txqsq *sq;
struct mlx5_cqe64 *cqe;
u32 dma_fifo_cc;
@@ -483,6 +487,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (!cqe)
return false;
+ stats = sq->stats;
+
npkts = 0;
nbytes = 0;
@@ -511,7 +517,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
queue_work(cq->channel->priv->wq,
&sq->recover.recover_work);
}
- sq->stats->cqe_err++;
+ stats->cqe_err++;
}
do {
@@ -556,6 +562,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ stats->cqes += i;
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
@@ -571,7 +579,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
MLX5E_SQ_STOP_ROOM) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
- sq->stats->wake++;
+ stats->wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 1b17f682693b..4e1f99a98d5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -74,10 +74,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
+ struct mlx5e_ch_stats *ch_stats = c->stats;
bool busy = false;
int work_done = 0;
int i;
+ ch_stats->poll++;
+
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
@@ -94,6 +97,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
+ ch_stats->aff_change++;
if (budget && work_done == budget)
work_done--;
}
@@ -101,6 +105,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
+ ch_stats->arm++;
+
for (i = 0; i < c->num_tc; i++) {
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
@@ -118,8 +124,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
- cq->event_ctr++;
napi_schedule(cq->napi);
+ cq->event_ctr++;
+ cq->channel->stats->events++;
}
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index c9736238604a..5cf5f2a9d51f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
void *ptr)
{
+ unsigned long flags;
int ret;
/* TLS metadata format is 1 byte for syndrome followed
@@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
idr_preload(GFP_KERNEL);
- spin_lock_irq(idr_spinlock);
+ spin_lock_irqsave(idr_spinlock, flags);
ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
- spin_unlock_irq(idr_spinlock);
+ spin_unlock_irqrestore(idr_spinlock, flags);
idr_preload_end();
return ret;
@@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr,
spin_unlock_irqrestore(idr_spinlock, flags);
}
+static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
+ struct mlx5_fpga_device *fdev,
+ struct mlx5_fpga_dma_buf *buf, u8 status)
+{
+ kfree(buf);
+}
+
struct mlx5_teardown_stream_context {
struct mlx5_fpga_tls_command_context cmd;
u32 swid;
@@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d",
syndrome);
- else
+ else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
- &fdev->tls->idr_spinlock,
+ &fdev->tls->tx_idr_spinlock,
+ ctx->swid);
+ else
+ mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
+ &fdev->tls->rx_idr_spinlock,
ctx->swid);
}
mlx5_fpga_tls_put_command_ctx(cmd);
@@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
MLX5_GET(tls_flow, flow, direction_sx));
}
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn)
+{
+ struct mlx5_fpga_dma_buf *buf;
+ int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
+ void *flow;
+ void *cmd;
+ int ret;
+
+ buf = kzalloc(size, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ cmd = (buf + 1);
+
+ rcu_read_lock();
+ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+ rcu_read_unlock();
+ mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+
+ MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
+ MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
+ MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
+ MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
+
+ buf->sg[0].data = cmd;
+ buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+ buf->complete = mlx_tls_kfree_complete;
+
+ ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+
+ return ret;
+}
+
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags)
{
@@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
mlx5_fpga_tls_teardown_completion);
}
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags)
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags, bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow;
rcu_read_lock();
- flow = idr_find(&tls->tx_idr, swid);
+ if (direction_sx)
+ flow = idr_find(&tls->tx_idr, swid);
+ else
+ flow = idr_find(&tls->rx_idr, swid);
+
rcu_read_unlock();
if (!flow) {
@@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
* the command context because we might not have received
* the tx completion yet.
*/
- mlx5_fpga_tls_del_tx_flow(fdev->mdev,
- MLX5_GET(tls_cmd, tls_cmd, swid),
- GFP_ATOMIC);
+ mlx5_fpga_tls_del_flow(fdev->mdev,
+ MLX5_GET(tls_cmd, tls_cmd, swid),
+ GFP_ATOMIC,
+ MLX5_GET(tls_cmd, tls_cmd,
+ direction_sx));
}
mlx5_fpga_tls_put_command_ctx(cmd);
@@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
if (err)
goto error;
- if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
- MLX5_ACCEL_TLS_AES_GCM128))) {
+ if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
err = -ENOTSUPP;
goto error;
}
@@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
INIT_LIST_HEAD(&tls->pending_cmds);
idr_init(&tls->tx_idr);
- spin_lock_init(&tls->idr_spinlock);
+ idr_init(&tls->rx_idr);
+ spin_lock_init(&tls->tx_idr_spinlock);
+ spin_lock_init(&tls->rx_idr_spinlock);
fdev->tls = tls;
return 0;
@@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
return 0;
}
-static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info, u32 swid,
- u32 tcp_sn)
+static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 swid, u32 tcp_sn)
{
u32 caps = mlx5_fpga_tls_device_caps(mdev);
struct mlx5_setup_stream_context *ctx;
@@ -533,30 +586,42 @@ out:
return ret;
}
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx)
{
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
int ret = -ENOMEM;
u32 swid;
- ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
+ if (direction_sx)
+ ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock, flow);
+ else
+ ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock, flow);
+
if (ret < 0)
return ret;
swid = ret;
- MLX5_SET(tls_flow, flow, direction_sx, 1);
+ MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
- ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
- start_offload_tcp_sn);
+ ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
+ start_offload_tcp_sn);
if (ret && ret != -EINTR)
goto free_swid;
*p_swid = swid;
return 0;
free_swid:
- mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
+ if (direction_sx)
+ mlx5_fpga_tls_release_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock, swid);
+ else
+ mlx5_fpga_tls_release_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock, swid);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
index 800a214e4e49..3b2e37bf76fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -46,15 +46,18 @@ struct mlx5_fpga_tls {
struct mlx5_fpga_conn *conn;
struct idr tx_idr;
- spinlock_t idr_spinlock; /* protects the IDR */
+ struct idr rx_idr;
+ spinlock_t tx_idr_spinlock; /* protects the IDR */
+ spinlock_t rx_idr_spinlock; /* protects the IDR */
};
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid);
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn, u32 *p_swid,
+ bool direction_sx);
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
- gfp_t flags);
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+ gfp_t flags, bool direction_sx);
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
@@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
return mdev->fpga->tls->caps;
}
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ u64 rcd_sn);
+
#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 0b47126815b6..2bd4c3184eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
return !wq->cur_sz;
}
+static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq)
+{
+ return wq->fbc.sz_m1 - wq->cur_sz;
+}
+
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
{
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0cadcabfe86f..4a1069166119 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -15,11 +15,18 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_kvdl.o spectrum_acl_tcam.o \
- spectrum_acl.o spectrum_flower.o \
- spectrum_cnt.o spectrum_fid.o \
- spectrum_ipip.o spectrum_acl_flex_actions.o \
- spectrum_mr.o spectrum_mr_tcam.o \
+ spectrum1_kvdl.o spectrum2_kvdl.o \
+ spectrum_kvdl.o \
+ spectrum_acl_tcam.o spectrum_acl_ctcam.o \
+ spectrum_acl_atcam.o \
+ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
+ spectrum_acl.o \
+ spectrum_flower.o spectrum_cnt.o \
+ spectrum_fid.o spectrum_ipip.o \
+ spectrum_acl_flex_actions.o \
+ spectrum_acl_flex_keys.o \
+ spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
+ spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 3c0d882ba183..9a473628831e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -351,9 +351,24 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block->first_set = mlxsw_afa_set_create(true);
if (!block->first_set)
goto err_first_set_create;
- block->cur_set = block->first_set;
+
+ /* In case user instructs to have dummy first set, we leave it
+ * empty here and create another, real, set right away.
+ */
+ if (mlxsw_afa->ops->dummy_first_set) {
+ block->cur_set = mlxsw_afa_set_create(false);
+ if (!block->cur_set)
+ goto err_second_set_create;
+ block->cur_set->prev = block->first_set;
+ block->first_set->next = block->cur_set;
+ } else {
+ block->cur_set = block->first_set;
+ }
+
return block;
+err_second_set_create:
+ mlxsw_afa_set_destroy(block->first_set);
err_first_set_create:
kfree(block);
return NULL;
@@ -415,11 +430,31 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_first_set);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
{
- return block->first_set->kvdl_index;
+ return block->cur_set->ht_key.enc_actions;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_cur_set);
+
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
+{
+ /* First set is never in KVD linear. So the first set
+ * with valid KVD linear index is always the second one.
+ */
+ if (WARN_ON(!block->first_set->next))
+ return 0;
+ return block->first_set->next->kvdl_index;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
+
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
+{
+ u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
+
+ return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
+ kvdl_index, activity);
}
-EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
+EXPORT_SYMBOL(mlxsw_afa_block_activity_get);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 3a155d104384..69628582fb4b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -45,6 +45,8 @@ struct mlxsw_afa_ops {
int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index,
char *enc_actions, bool is_first);
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
+ int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
+ bool *activity);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
@@ -54,6 +56,7 @@ struct mlxsw_afa_ops {
bool ingress, int *p_span_id);
void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
bool ingress);
+ bool dummy_first_set;
};
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -64,7 +67,9 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
+char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block);
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index b32a00972e83..5f8485c7640e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -43,6 +43,7 @@
struct mlxsw_afk {
struct list_head key_info_list;
unsigned int max_blocks;
+ const struct mlxsw_afk_ops *ops;
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
};
@@ -69,8 +70,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
}
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
- const struct mlxsw_afk_block *blocks,
- unsigned int blocks_count)
+ const struct mlxsw_afk_ops *ops)
{
struct mlxsw_afk *mlxsw_afk;
@@ -79,8 +79,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
return NULL;
INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
mlxsw_afk->max_blocks = max_blocks;
- mlxsw_afk->blocks = blocks;
- mlxsw_afk->blocks_count = blocks_count;
+ mlxsw_afk->ops = ops;
+ mlxsw_afk->blocks = ops->blocks;
+ mlxsw_afk->blocks_count = ops->blocks_count;
WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
return mlxsw_afk;
}
@@ -415,61 +416,74 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
}
EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
-static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item,
- const struct mlxsw_item *output_item,
- char *storage, char *output_indexed)
+static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
{
u32 value;
value = __mlxsw_item_get32(storage, storage_item, 0);
- __mlxsw_item_set32(output_indexed, output_item, 0, value);
+ __mlxsw_item_set32(output, output_item, 0, value);
}
-static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item,
- const struct mlxsw_item *output_item,
- char *storage, char *output_indexed)
+static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item,
+ const struct mlxsw_item *output_item,
+ char *storage, char *output)
{
char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
- char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
+ char *output_data = __mlxsw_item_data(output, output_item, 0);
size_t len = output_item->size.bytes;
memcpy(output_data, storage_data, len);
}
-#define MLXSW_AFK_KEY_BLOCK_SIZE 16
-
-static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
- int block_index, char *storage, char *output)
+static void
+mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
+ char *output, char *storage)
{
- char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE;
const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
- mlxsw_afk_encode_u32(storage_item, output_item,
- storage, output_indexed);
+ mlxsw_sp_afk_encode_u32(storage_item, output_item,
+ storage, output);
else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
- mlxsw_afk_encode_buf(storage_item, output_item,
- storage, output_indexed);
+ mlxsw_sp_afk_encode_buf(storage_item, output_item,
+ storage, output);
}
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16
+
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
char *key, char *mask)
{
+ char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
+ char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
- int block_index;
+ int block_index, i;
+
+ for (i = 0; i < key_info->blocks_count; i++) {
+ memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+ memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
+
+ mlxsw_afk_element_usage_for_each(element, &values->elusage) {
+ elinst = mlxsw_afk_key_info_elinst_get(key_info,
+ element,
+ &block_index);
+ if (!elinst || block_index != i)
+ continue;
+
+ mlxsw_sp_afk_encode_one(elinst, block_key,
+ values->storage.key);
+ mlxsw_sp_afk_encode_one(elinst, block_mask,
+ values->storage.mask);
+ }
- mlxsw_afk_element_usage_for_each(element, &values->elusage) {
- elinst = mlxsw_afk_key_info_elinst_get(key_info, element,
- &block_index);
- if (!elinst)
- continue;
- mlxsw_afk_encode_one(elinst, block_index,
- values->storage.key, key);
- mlxsw_afk_encode_one(elinst, block_index,
- values->storage.mask, mask);
+ mlxsw_afk->ops->encode_block(block_key, i, key);
+ mlxsw_afk->ops->encode_block(block_mask, i, mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 122506daa586..2ffde915349b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -42,16 +42,20 @@
enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
- MLXSW_AFK_ELEMENT_DMAC,
- MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP4,
- MLXSW_AFK_ELEMENT_DST_IP4,
- MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- MLXSW_AFK_ELEMENT_DST_IP6_HI,
- MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
@@ -99,9 +103,11 @@ struct mlxsw_afk_element_info {
* define an internal storage geometry.
*/
static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
- MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6),
- MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
@@ -112,12 +118,14 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
};
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
@@ -208,9 +216,14 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
struct mlxsw_afk;
+struct mlxsw_afk_ops {
+ const struct mlxsw_afk_block *blocks;
+ unsigned int blocks_count;
+ void (*encode_block)(char *block, int block_index, char *output);
+};
+
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
- const struct mlxsw_afk_block *blocks,
- unsigned int blocks_count);
+ const struct mlxsw_afk_ops *ops);
void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
struct mlxsw_afk_key_info;
@@ -243,7 +256,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
enum mlxsw_afk_element element,
const char *key_value, const char *mask_value,
unsigned int len);
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+ struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
char *key, char *mask);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index d65582325cd5..7461f8fe1133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -39,6 +39,7 @@
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1877d9f8a11a..596fddfb3850 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -39,6 +39,7 @@
#ifndef _MLXSW_REG_H
#define _MLXSW_REG_H
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
@@ -1943,6 +1944,28 @@ static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
mlxsw_reg_cwtpm_ntcp_r_set(payload, profile);
}
+/* PGCR - Policy-Engine General Configuration Register
+ * ---------------------------------------------------
+ * This register configures general Policy-Engine settings.
+ */
+#define MLXSW_REG_PGCR_ID 0x3001
+#define MLXSW_REG_PGCR_LEN 0x20
+
+MLXSW_REG_DEFINE(pgcr, MLXSW_REG_PGCR_ID, MLXSW_REG_PGCR_LEN);
+
+/* reg_pgcr_default_action_pointer_base
+ * Default action pointer base. Each region has a default action pointer
+ * which is equal to default_action_pointer_base + region_id.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pgcr, default_action_pointer_base, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_pgcr_pack(char *payload, u32 pointer_base)
+{
+ MLXSW_REG_ZERO(pgcr, payload);
+ mlxsw_reg_pgcr_default_action_pointer_base_set(payload, pointer_base);
+}
+
/* PPBT - Policy-Engine Port Binding Table
* ---------------------------------------
* This register is used for configuration of the Port Binding Table.
@@ -2132,14 +2155,18 @@ MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4);
/* reg_ptar_action_set_type
* Type of action set to be used on this region.
- * For Spectrum, this is always type 2 - "flexible"
+ * For Spectrum and Spectrum-2, this is always type 2 - "flexible"
* Access: WO
*/
MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8);
+enum mlxsw_reg_ptar_key_type {
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */
+ MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */
+};
+
/* reg_ptar_key_type
* TCAM key type for the region.
- * For Spectrum, this is always type 0x50 - "FLEX_KEY"
* Access: WO
*/
MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8);
@@ -2182,13 +2209,14 @@ MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8,
MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false);
static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op,
+ enum mlxsw_reg_ptar_key_type key_type,
u16 region_size, u16 region_id,
const char *tcam_region_info)
{
MLXSW_REG_ZERO(ptar, payload);
mlxsw_reg_ptar_op_set(payload, op);
mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */
- mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */
+ mlxsw_reg_ptar_key_type_set(payload, key_type);
mlxsw_reg_ptar_region_size_set(payload, region_size);
mlxsw_reg_ptar_region_id_set(payload, region_id);
mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info);
@@ -2327,6 +2355,23 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
*/
MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
+/* reg_pefa_a
+ * Index in the KVD Linear Centralized Database.
+ * Activity
+ * For a new entry: set if ca=0, clear if ca=1
+ * Set if a packet lookup has hit on the specific entry
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pefa, a, 0x04, 29, 1);
+
+/* reg_pefa_ca
+ * Clear activity
+ * When write: activity is according to this field
+ * When read: after reading the activity is cleared according to ca
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1);
+
#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8
/* reg_pefa_flex_action_set
@@ -2336,12 +2381,20 @@ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
*/
MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN);
-static inline void mlxsw_reg_pefa_pack(char *payload, u32 index,
+static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca,
const char *flex_action_set)
{
MLXSW_REG_ZERO(pefa, payload);
mlxsw_reg_pefa_index_set(payload, index);
- mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set);
+ mlxsw_reg_pefa_ca_set(payload, ca);
+ if (flex_action_set)
+ mlxsw_reg_pefa_flex_action_set_memcpy_to(payload,
+ flex_action_set);
+}
+
+static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a)
+{
+ *p_a = mlxsw_reg_pefa_a_get(payload);
}
/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
@@ -2397,6 +2450,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
*/
MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
+/* reg_ptce2_priority
+ * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1.
+ * Note: priority does not have to be unique per rule.
+ * Within a region, higher priority should have lower offset (no limitation
+ * between regions in a multi-region).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
+
/* reg_ptce2_tcam_region_info
* Opaque object that represents the TCAM region.
* Access: Index
@@ -2432,15 +2494,244 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
enum mlxsw_reg_ptce2_op op,
const char *tcam_region_info,
- u16 offset)
+ u16 offset, u32 priority)
{
MLXSW_REG_ZERO(ptce2, payload);
mlxsw_reg_ptce2_v_set(payload, valid);
mlxsw_reg_ptce2_op_set(payload, op);
mlxsw_reg_ptce2_offset_set(payload, offset);
+ mlxsw_reg_ptce2_priority_set(payload, priority);
mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
}
+/* PERAR - Policy-Engine Region Association Register
+ * -------------------------------------------------
+ * This register associates a hw region for region_id's. Changing on the fly
+ * is supported by the device.
+ */
+#define MLXSW_REG_PERAR_ID 0x3026
+#define MLXSW_REG_PERAR_LEN 0x08
+
+MLXSW_REG_DEFINE(perar, MLXSW_REG_PERAR_ID, MLXSW_REG_PERAR_LEN);
+
+/* reg_perar_region_id
+ * Region identifier
+ * Range 0 .. cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, perar, region_id, 0x00, 0, 16);
+
+static inline unsigned int
+mlxsw_reg_perar_hw_regions_needed(unsigned int block_num)
+{
+ return DIV_ROUND_UP(block_num, 4);
+}
+
+/* reg_perar_hw_region
+ * HW Region
+ * Range 0 .. cap_max_regions-1
+ * Default: hw_region = region_id
+ * For a 8 key block region, 2 consecutive regions are used
+ * For a 12 key block region, 3 consecutive regions are used
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, perar, hw_region, 0x04, 0, 16);
+
+static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id,
+ u16 hw_region)
+{
+ MLXSW_REG_ZERO(perar, payload);
+ mlxsw_reg_perar_region_id_set(payload, region_id);
+ mlxsw_reg_perar_hw_region_set(payload, hw_region);
+}
+
+/* PERCR - Policy-Engine Region Configuration Register
+ * ---------------------------------------------------
+ * This register configures the region parameters. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERCR_ID 0x302A
+#define MLXSW_REG_PERCR_LEN 0x80
+
+MLXSW_REG_DEFINE(percr, MLXSW_REG_PERCR_ID, MLXSW_REG_PERCR_LEN);
+
+/* reg_percr_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, percr, region_id, 0x00, 0, 16);
+
+/* reg_percr_atcam_ignore_prune
+ * Ignore prune_vector by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, atcam_ignore_prune, 0x04, 25, 1);
+
+/* reg_percr_ctcam_ignore_prune
+ * Ignore prune_ctcam by other A-TCAM rules. Used e.g., for a new rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, ctcam_ignore_prune, 0x04, 24, 1);
+
+/* reg_percr_bf_bypass
+ * Bloom filter bypass.
+ * 0 - Bloom filter is used (default)
+ * 1 - Bloom filter is bypassed. The bypass is an OR condition of
+ * region_id or eRP. See PERPT.bf_bypass
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, percr, bf_bypass, 0x04, 16, 1);
+
+/* reg_percr_master_mask
+ * Master mask. Logical OR mask of all masks of all rules of a region
+ * (both A-TCAM and C-TCAM). When there are no eRPs
+ * (erpt_pointer_valid = 0), then this provides the mask.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, percr, master_mask, 0x20, 96);
+
+static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
+{
+ MLXSW_REG_ZERO(percr, payload);
+ mlxsw_reg_percr_region_id_set(payload, region_id);
+ mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
+ mlxsw_reg_percr_bf_bypass_set(payload, true);
+ memset(payload + 0x20, 0xff, 96);
+}
+
+/* PERERP - Policy-Engine Region eRP Register
+ * ------------------------------------------
+ * This register configures the region eRP. The region_id must be
+ * allocated.
+ */
+#define MLXSW_REG_PERERP_ID 0x302B
+#define MLXSW_REG_PERERP_LEN 0x1C
+
+MLXSW_REG_DEFINE(pererp, MLXSW_REG_PERERP_ID, MLXSW_REG_PERERP_LEN);
+
+/* reg_pererp_region_id
+ * Region identifier.
+ * Range 0..cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pererp, region_id, 0x00, 0, 16);
+
+/* reg_pererp_ctcam_le
+ * C-TCAM lookup enable. Reserved when erpt_pointer_valid = 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, ctcam_le, 0x04, 28, 1);
+
+/* reg_pererp_erpt_pointer_valid
+ * erpt_pointer is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer_valid, 0x10, 31, 1);
+
+/* reg_pererp_erpt_bank_pointer
+ * Pointer to eRP table bank. May be modified at any time.
+ * Range 0..cap_max_erp_table_banks-1
+ * Reserved when erpt_pointer_valid = 0
+ */
+MLXSW_ITEM32(reg, pererp, erpt_bank_pointer, 0x10, 16, 4);
+
+/* reg_pererp_erpt_pointer
+ * Pointer to eRP table within the eRP bank. Can be changed for an
+ * existing region.
+ * Range 0..cap_max_erp_table_size-1
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, erpt_pointer, 0x10, 0, 8);
+
+/* reg_pererp_erpt_vector
+ * Vector of allowed eRP indexes starting from erpt_pointer within the
+ * erpt_bank_pointer. Next entries will be in next bank.
+ * Note that eRP index is used and not eRP ID.
+ * Reserved when erpt_pointer_valid = 0
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1);
+
+/* reg_pererp_master_rp_id
+ * Master RP ID. When there are no eRPs, then this provides the eRP ID
+ * for the lookup. Can be changed for an existing region.
+ * Reserved when erpt_pointer_valid = 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4);
+
+static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id)
+{
+ MLXSW_REG_ZERO(pererp, payload);
+ mlxsw_reg_pererp_region_id_set(payload, region_id);
+ mlxsw_reg_pererp_ctcam_le_set(payload, true);
+ mlxsw_reg_pererp_erpt_pointer_valid_set(payload, true);
+}
+
+/* IEDR - Infrastructure Entry Delete Register
+ * ----------------------------------------------------
+ * This register is used for deleting entries from the entry tables.
+ * It is legitimate to attempt to delete a nonexisting entry (the device will
+ * respond as a good flow).
+ */
+#define MLXSW_REG_IEDR_ID 0x3804
+#define MLXSW_REG_IEDR_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_IEDR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_IEDR_REC_MAX_COUNT 64
+#define MLXSW_REG_IEDR_LEN (MLXSW_REG_IEDR_BASE_LEN + \
+ MLXSW_REG_IEDR_REC_LEN * \
+ MLXSW_REG_IEDR_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(iedr, MLXSW_REG_IEDR_ID, MLXSW_REG_IEDR_LEN);
+
+/* reg_iedr_num_rec
+ * Number of records.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, iedr, num_rec, 0x00, 0, 8);
+
+/* reg_iedr_rec_type
+ * Resource type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_size
+ * Size of entries do be deleted. The unit is 1 entry, regardless of entry type.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11,
+ MLXSW_REG_IEDR_REC_LEN, 0x00, false);
+
+/* reg_iedr_rec_index_start
+ * Resource index start.
+ * Access: OP
+ */
+MLXSW_ITEM32_INDEXED(reg, iedr, rec_index_start, MLXSW_REG_IEDR_BASE_LEN, 0, 24,
+ MLXSW_REG_IEDR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_iedr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(iedr, payload);
+}
+
+static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index,
+ u8 rec_type, u16 rec_size,
+ u32 rec_index_start)
+{
+ u8 num_rec = mlxsw_reg_iedr_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_iedr_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_iedr_rec_type_set(payload, rec_index, rec_type);
+ mlxsw_reg_iedr_rec_size_set(payload, rec_index, rec_size);
+ mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start);
+}
+
/* QPCR - QoS Policer Configuration Register
* -----------------------------------------
* The QPCR register is used to create policers - that limit
@@ -3350,6 +3641,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
MLXSW_REG_PPCNT_EXT_CNT = 0x5,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
@@ -3508,6 +3800,68 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+/* Ethernet RFC 2819 Counter Group */
+
+/* reg_ppcnt_ether_stats_pkts64octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts65to127octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts128to255octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts256to511octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts512to1023octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1024to1518octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1519to2047octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts2048to4095octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts4096to8191octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts8192to10239octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+
/* Ethernet Extended Counter Group Counters */
/* reg_ppcnt_ecn_marked
@@ -4338,6 +4692,20 @@ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
*/
MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
+/* reg_ritr_if_vrrp_id_ipv6
+ * VRRP ID for IPv6
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8);
+
+/* reg_ritr_if_vrrp_id_ipv4
+ * VRRP ID for IPv4
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
+
/* VLAN Interface */
/* reg_ritr_vlan_if_vid
@@ -7871,6 +8239,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
+ MLXSW_REG(pgcr),
MLXSW_REG(ppbt),
MLXSW_REG(pacl),
MLXSW_REG(pagt),
@@ -7879,6 +8248,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(prcr),
MLXSW_REG(pefa),
MLXSW_REG(ptce2),
+ MLXSW_REG(perar),
+ MLXSW_REG(percr),
+ MLXSW_REG(pererp),
+ MLXSW_REG(iedr),
MLXSW_REG(qpcr),
MLXSW_REG(qtct),
MLXSW_REG(qeec),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index fd9299ccec72..f672a7b71de7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -42,6 +42,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
+ MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
MLXSW_RES_ID_CQE_V0,
MLXSW_RES_ID_CQE_V1,
@@ -83,6 +85,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
+ [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
[MLXSW_RES_ID_CQE_V0] = 0x2210,
[MLXSW_RES_ID_CQE_V1] = 0x2211,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 968b88af2ef5..317c92d6496e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -74,17 +74,25 @@
#include "spectrum_span.h"
#include "../mlxfw/mlxfw.h"
-#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1620
-#define MLXSW_FWREV_SUBMINOR 192
-#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
+#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
-#define MLXSW_SP_FW_FILENAME \
- "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
- "." __stringify(MLXSW_FWREV_MINOR) \
- "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
+#define MLXSW_SP1_FWREV_MAJOR 13
+#define MLXSW_SP1_FWREV_MINOR 1620
+#define MLXSW_SP1_FWREV_SUBMINOR 192
-static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
+ .major = MLXSW_SP1_FWREV_MAJOR,
+ .minor = MLXSW_SP1_FWREV_MINOR,
+ .subminor = MLXSW_SP1_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP1_FW_FILENAME \
+ "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP1_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
+
+static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp_driver_version[] = "1.0";
/* tx_hdr_version
@@ -338,29 +346,35 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
+ const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
+ const char *fw_filename = mlxsw_sp->fw_filename;
const struct firmware *firmware;
int err;
+ /* Don't check if driver does not require it */
+ if (!req_rev || !fw_filename)
+ return 0;
+
/* Validate driver & FW are compatible */
- if (rev->major != MLXSW_FWREV_MAJOR) {
+ if (rev->major != req_rev->major) {
WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
- rev->major, MLXSW_FWREV_MAJOR);
+ rev->major, req_rev->major);
return -EINVAL;
}
- if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
- MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
+ if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
- MLXSW_SP_FW_FILENAME);
+ fw_filename);
- err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
+ err = request_firmware_direct(&firmware, fw_filename,
mlxsw_sp->bus_info->dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
- MLXSW_SP_FW_FILENAME);
+ fw_filename);
return err;
}
@@ -1503,7 +1517,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
static int
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tcf_block *block, bool ingress)
+ struct tcf_block *block, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_block *acl_block;
@@ -1518,7 +1533,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
block_cb = __tcf_block_cb_register(block,
mlxsw_sp_setup_tc_block_cb_flower,
- mlxsw_sp, acl_block);
+ mlxsw_sp, acl_block, extack);
if (IS_ERR(block_cb)) {
err = PTR_ERR(block_cb);
goto err_cb_register;
@@ -1541,7 +1556,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
err_block_bind:
if (!tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
err_cb_register:
mlxsw_sp_acl_block_destroy(acl_block);
}
@@ -1571,7 +1586,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress);
if (!err && !tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
mlxsw_sp_acl_block_destroy(acl_block);
}
}
@@ -1596,11 +1611,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
switch (f->command) {
case TC_BLOCK_BIND:
err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ mlxsw_sp_port, f->extack);
if (err)
return err;
err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
- f->block, ingress);
+ f->block, ingress,
+ f->extack);
if (err) {
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return err;
@@ -1712,7 +1728,8 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+ sizeof(drvinfo->driver));
strlcpy(drvinfo->version, mlxsw_sp_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -1873,6 +1890,52 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
+ {
+ .str = "ether_pkts64octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
+ },
+ {
+ .str = "ether_pkts65to127octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
+ },
+ {
+ .str = "ether_pkts128to255octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
+ },
+ {
+ .str = "ether_pkts256to511octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
+ },
+ {
+ .str = "ether_pkts512to1023octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
+ },
+ {
+ .str = "ether_pkts1024to1518octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
+ },
+ {
+ .str = "ether_pkts1519to2047octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
+ },
+ {
+ .str = "ether_pkts2048to4095octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
+ },
+ {
+ .str = "ether_pkts4096to8191octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
+ },
+ {
+ .str = "ether_pkts8192to10239octets",
+ .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
{
.str = "rx_octets_prio",
@@ -1964,6 +2027,11 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
mlxsw_sp_port_get_prio_strings(&p, i);
@@ -2003,10 +2071,14 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
int *p_len, enum mlxsw_reg_ppcnt_grp grp)
{
switch (grp) {
- case MLXSW_REG_PPCNT_IEEE_8023_CNT:
+ case MLXSW_REG_PPCNT_IEEE_8023_CNT:
*p_hw_stats = mlxsw_sp_port_hw_stats;
*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_2819_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_PRIO_CNT:
*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2056,6 +2128,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+ /* RFC 2819 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+
/* Per-Priority Counters */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
@@ -3371,6 +3448,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
/* PKT Sample trap */
MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
false, SP_IP2ME, DISCARD),
@@ -3757,6 +3836,36 @@ err_fids_init:
return err;
}
+static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
+ mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+}
+
+static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+}
+
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3777,7 +3886,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_kvdl_fini(mlxsw_sp);
}
-static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
+static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
@@ -3803,6 +3912,28 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
},
};
+static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
+ .used_max_mid = 1,
+ .max_mid = MLXSW_SP_MID_MAX,
+ .used_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .max_fid_offset_flood_tables = 3,
+ .fid_offset_flood_table_size = VLAN_N_VID - 1,
+ .max_fid_flood_tables = 3,
+ .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -3839,7 +3970,7 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
DEVLINK_RESOURCE_UNIT_ENTRY);
}
-static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct devlink_resource_size_params hash_single_size_params;
@@ -3850,7 +3981,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
const struct mlxsw_config_profile *profile;
int err;
- profile = &mlxsw_sp_config_profile;
+ profile = &mlxsw_sp1_config_profile;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
return -EIO;
@@ -3876,7 +4007,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
- err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
+ err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
if (err)
return err;
@@ -3905,6 +4036,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
return 0;
}
+static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_sp1_resources_kvd_register(mlxsw_core);
+}
+
+static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ return 0;
+}
+
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
@@ -3960,10 +4101,10 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
-static struct mlxsw_driver mlxsw_sp_driver = {
- .kind = mlxsw_sp_driver_name,
+static struct mlxsw_driver mlxsw_sp1_driver = {
+ .kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp_init,
+ .init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
@@ -3979,10 +4120,35 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
- .resources_register = mlxsw_sp_resources_register,
+ .resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp_config_profile,
+ .profile = &mlxsw_sp1_config_profile,
+ .res_query_enabled = true,
+};
+
+static struct mlxsw_driver mlxsw_sp2_driver = {
+ .kind = mlxsw_sp2_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp2_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
};
@@ -4397,7 +4563,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
- !netif_is_ovs_master(upper_dev)) {
+ !netif_is_ovs_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
@@ -4423,6 +4590,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
@@ -4461,6 +4633,9 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
else
mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
}
break;
}
@@ -4545,8 +4720,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
+ if (!netif_is_bridge_master(upper_dev) &&
+ !netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
if (!info->linking)
@@ -4558,6 +4734,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4571,6 +4752,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
upper_dev);
+ } else if (netif_is_macvlan(upper_dev)) {
+ if (!info->linking)
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4620,6 +4804,64 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev))
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+
+ if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+
+ return -EOPNOTSUPP;
+}
+
static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info = ptr;
@@ -4661,6 +4903,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
else if (is_vlan_dev(dev))
err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ else if (netif_is_macvlan(dev))
+ err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
return notifier_from_errno(err);
}
@@ -4681,14 +4927,24 @@ static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
.notifier_call = mlxsw_sp_inet6addr_event,
};
-static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
+static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, },
};
-static struct pci_driver mlxsw_sp_pci_driver = {
- .name = mlxsw_sp_driver_name,
- .id_table = mlxsw_sp_pci_id_table,
+static struct pci_driver mlxsw_sp1_pci_driver = {
+ .name = mlxsw_sp1_driver_name,
+ .id_table = mlxsw_sp1_pci_id_table,
+};
+
+static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp2_pci_driver = {
+ .name = mlxsw_sp2_driver_name,
+ .id_table = mlxsw_sp2_pci_id_table,
};
static int __init mlxsw_sp_module_init(void)
@@ -4700,19 +4956,31 @@ static int __init mlxsw_sp_module_init(void)
register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
- err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+ err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
+ if (err)
+ goto err_sp1_core_driver_register;
+
+ err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
+ if (err)
+ goto err_sp2_core_driver_register;
+
+ err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
- goto err_core_driver_register;
+ goto err_sp1_pci_driver_register;
- err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
+ err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
if (err)
- goto err_pci_driver_register;
+ goto err_sp2_pci_driver_register;
return 0;
-err_pci_driver_register:
- mlxsw_core_driver_unregister(&mlxsw_sp_driver);
-err_core_driver_register:
+err_sp2_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+err_sp2_core_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
+err_sp1_core_driver_register:
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4722,8 +4990,10 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
- mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
- mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
@@ -4736,5 +5006,6 @@ module_exit(mlxsw_sp_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
-MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
-MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
+MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4a519d8edec8..016058961542 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -145,6 +145,9 @@ struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp_kvdl;
+struct mlxsw_sp_kvdl_ops;
+struct mlxsw_sp_mr_tcam_ops;
+struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -168,6 +171,13 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
+ const struct mlxsw_fw_rev *req_rev;
+ const char *fw_filename;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ const struct mlxsw_afa_ops *afa_ops;
+ const struct mlxsw_afk_ops *afk_ops;
+ const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
+ const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
};
static inline struct mlxsw_sp_upper *
@@ -407,6 +417,8 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
@@ -435,15 +447,62 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
+enum mlxsw_sp_kvdl_entry_type {
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+};
+
+static inline unsigned int
+mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
+{
+ switch (type) {
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
+ default:
+ return 1;
+ }
+}
+
+struct mlxsw_sp_kvdl_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+ void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+ int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+ int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
+};
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
- u32 *p_entry_index);
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
- unsigned int entry_count,
- unsigned int *p_alloc_size);
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index);
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count);
+
+/* spectrum1_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+
+/* spectrum2_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
@@ -452,44 +511,14 @@ struct mlxsw_sp_acl_rule_info {
unsigned int counter_index;
};
-enum mlxsw_sp_acl_profile {
- MLXSW_SP_ACL_PROFILE_FLOWER,
-};
-
-struct mlxsw_sp_acl_profile_ops {
- size_t ruleset_priv_size;
- int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
- void *priv, void *ruleset_priv);
- void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
- int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
- u16 (*ruleset_group_id)(void *ruleset_priv);
- size_t rule_priv_size;
- int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv, void *rule_priv,
- struct mlxsw_sp_acl_rule_info *rulei);
- void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
- int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
- bool *activity);
-};
-
-struct mlxsw_sp_acl_ops {
- size_t priv_size;
- int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
- void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
- const struct mlxsw_sp_acl_profile_ops *
- (*profile_ops)(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_acl_profile profile);
-};
-
struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
+enum mlxsw_sp_acl_profile {
+ MLXSW_SP_ACL_PROFILE_FLOWER,
+};
+
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
@@ -582,7 +611,51 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
/* spectrum_acl_tcam.c */
-extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+struct mlxsw_sp_acl_tcam;
+struct mlxsw_sp_acl_tcam_region;
+
+struct mlxsw_sp_acl_tcam_ops {
+ enum mlxsw_reg_ptar_key_type key_type;
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ size_t region_priv_size;
+ int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ struct mlxsw_sp_acl_tcam_region *region);
+ void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
+ int (*region_associate)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region);
+ size_t chunk_priv_size;
+ void (*chunk_init)(void *region_priv, void *chunk_priv,
+ unsigned int priority);
+ void (*chunk_fini)(void *chunk_priv);
+ size_t entry_priv_size;
+ int (*entry_add)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv);
+ int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity);
+};
+
+/* spectrum1_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
+
+/* spectrum2_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops;
+
+/* spectrum_acl_flex_actions.c */
+extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
+extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
+
+/* spectrum_acl_flex_keys.c */
+extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
+extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -631,4 +704,40 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_mr.c */
+enum mlxsw_sp_mr_route_prio {
+ MLXSW_SP_MR_ROUTE_PRIO_SG,
+ MLXSW_SP_MR_ROUTE_PRIO_STARG,
+ MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ __MLXSW_SP_MR_ROUTE_PRIO_MAX
+};
+
+#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
+
+struct mlxsw_sp_mr_route_key;
+
+struct mlxsw_sp_mr_tcam_ops {
+ size_t priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ void (*fini)(void *priv);
+ size_t route_priv_size;
+ int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio);
+ void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key);
+ int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block);
+};
+
+/* spectrum1_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
+
+/* spectrum2_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
new file mode 100644
index 000000000000..d339ec43d79c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -0,0 +1,253 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp1_acl_tcam_region {
+ struct mlxsw_sp_acl_ctcam_region cregion;
+ struct mlxsw_sp_acl_tcam_region *region;
+ struct {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_sp_acl_rule_info *rulei;
+ } catchall;
+};
+
+struct mlxsw_sp1_acl_tcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp1_acl_tcam_entry {
+ struct mlxsw_sp_acl_ctcam_entry centry;
+};
+
+static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+}
+
+static int
+mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+ int err;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
+ &region->catchall.cchunk,
+ MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
+ rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ if (IS_ERR(rulei)) {
+ err = PTR_ERR(rulei);
+ goto err_rulei_create;
+ }
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (WARN_ON(err))
+ goto err_rulei_act_continue;
+ err = mlxsw_sp_acl_rulei_commit(rulei);
+ if (err)
+ goto err_rulei_commit;
+ err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry,
+ rulei, false);
+ if (err)
+ goto err_entry_add;
+ region->catchall.rulei = rulei;
+ return 0;
+
+err_entry_add:
+err_rulei_commit:
+err_rulei_act_continue:
+ mlxsw_sp_acl_rulei_destroy(rulei);
+err_rulei_create:
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_acl_tcam_region *region)
+{
+ struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &region->catchall.cchunk,
+ &region->catchall.centry);
+ mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ struct mlxsw_sp_acl_tcam_region *_region)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ int err;
+
+ err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
+ _region);
+ if (err)
+ return err;
+ err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
+ if (err)
+ goto err_catchall_add;
+ region->region = _region;
+ return 0;
+
+err_catchall_add:
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+ return err;
+}
+
+static void
+mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region);
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
+ priority);
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
+}
+
+static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry,
+ rulei, false);
+}
+
+static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *_region,
+ unsigned int offset,
+ bool *activity)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ int err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+ _region->tcam_region_info, offset, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ return err;
+ *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+ return 0;
+}
+
+static int
+mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+ unsigned int offset;
+
+ offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry);
+ return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp,
+ region->region,
+ offset, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX,
+ .priv_size = 0,
+ .init = mlxsw_sp1_acl_tcam_init,
+ .fini = mlxsw_sp1_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region),
+ .region_init = mlxsw_sp1_acl_tcam_region_init,
+ .region_fini = mlxsw_sp1_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp1_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp1_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry),
+ .entry_add = mlxsw_sp1_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp1_acl_tcam_entry_del,
+ .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
new file mode 100644
index 000000000000..0d4583894a97
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -0,0 +1,459 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP1_KVDL_SINGLE_BASE 0
+#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP1_KVDL_SINGLE_END \
+ (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1)
+
+#define MLXSW_SP1_KVDL_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE)
+#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152
+#define MLXSW_SP1_KVDL_CHUNKS_END \
+ (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \
+ (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \
+ (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1
+#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
+
+struct mlxsw_sp1_kvdl_part_info {
+ unsigned int part_index;
+ unsigned int start_index;
+ unsigned int end_index;
+ unsigned int alloc_size;
+ enum mlxsw_sp_resource_id resource_id;
+};
+
+enum mlxsw_sp1_kvdl_part_id {
+ MLXSW_SP1_KVDL_PART_ID_SINGLE,
+ MLXSW_SP1_KVDL_PART_ID_CHUNKS,
+ MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS,
+};
+
+#define MLXSW_SP1_KVDL_PART_INFO(id) \
+[MLXSW_SP1_KVDL_PART_ID_##id] = { \
+ .start_index = MLXSW_SP1_KVDL_##id##_BASE, \
+ .end_index = MLXSW_SP1_KVDL_##id##_END, \
+ .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \
+ .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
+}
+
+static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
+ MLXSW_SP1_KVDL_PART_INFO(SINGLE),
+ MLXSW_SP1_KVDL_PART_INFO(CHUNKS),
+ MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS),
+};
+
+#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info)
+
+struct mlxsw_sp1_kvdl_part {
+ struct mlxsw_sp1_kvdl_part_info info;
+ unsigned long usage[0]; /* Entries */
+};
+
+struct mlxsw_sp1_kvdl {
+ struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN];
+};
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl,
+ unsigned int alloc_size)
+{
+ struct mlxsw_sp1_kvdl_part *part, *min_part = NULL;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (alloc_size <= part->info.alloc_size &&
+ (!min_part ||
+ part->info.alloc_size <= min_part->info.alloc_size))
+ min_part = part;
+ }
+
+ return min_part ?: ERR_PTR(-ENOBUFS);
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index)
+{
+ struct mlxsw_sp1_kvdl_part *part;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (kvdl_index >= part->info.start_index &&
+ kvdl_index <= part->info.end_index)
+ return part;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static u32
+mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ unsigned int entry_index)
+{
+ return info->start_index + entry_index * info->alloc_size;
+}
+
+static unsigned int
+mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info,
+ u32 kvdl_index)
+{
+ return (kvdl_index - info->start_index) / info->alloc_size;
+}
+
+static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part,
+ u32 *p_kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index, nr_entries;
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ entry_index = find_first_zero_bit(part->usage, nr_entries);
+ if (entry_index == nr_entries)
+ return -ENOBUFS;
+ __set_bit(entry_index, part->usage);
+
+ *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index);
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part,
+ u32 kvdl_index)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int entry_index;
+
+ entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index);
+ __clear_bit(entry_index, part->usage);
+}
+
+static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ /* Find partition with smallest allocation size satisfying the
+ * requested size.
+ */
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index);
+}
+
+static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index);
+ if (IS_ERR(part))
+ return;
+ mlxsw_sp1_kvdl_part_free(part, entry_index);
+}
+
+static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size)
+{
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ *p_alloc_size = part->info.alloc_size;
+
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part,
+ struct mlxsw_sp1_kvdl_part *part_prev,
+ unsigned int size)
+{
+ if (!part_prev) {
+ part->info.end_index = size - 1;
+ } else {
+ part->info.start_index = part_prev->info.end_index + 1;
+ part->info.end_index = part->info.start_index + size - 1;
+ }
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp1_kvdl_part_info *info,
+ struct mlxsw_sp1_kvdl_part *part_prev)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl_part *part;
+ bool need_update = true;
+ unsigned int nr_entries;
+ size_t usage_size;
+ u64 resource_size;
+ int err;
+
+ err = devlink_resource_size_get(devlink, info->resource_id,
+ &resource_size);
+ if (err) {
+ need_update = false;
+ resource_size = info->end_index - info->start_index + 1;
+ }
+
+ nr_entries = div_u64(resource_size, info->alloc_size);
+ usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&part->info, info, sizeof(part->info));
+
+ if (need_update)
+ mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size);
+ return part;
+}
+
+static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_kvdl *kvdl)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info;
+ struct mlxsw_sp1_kvdl_part *part_prev = NULL;
+ int err, i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp1_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info,
+ part_prev);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ part_prev = kvdl->parts[i];
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part)
+{
+ const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (info->end_index -
+ info->start_index + 1) /
+ info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += info->alloc_size;
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ u64 occ = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+ occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]);
+
+ return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv)
+{
+ const struct mlxsw_sp1_kvdl *kvdl = priv;
+ struct mlxsw_sp1_kvdl_part *part;
+
+ part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS];
+ return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+ int err;
+
+ err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
+ if (err)
+ return err;
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
+ return 0;
+}
+
+static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp1_kvdl *kvdl = priv;
+
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
+ mlxsw_sp1_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_kvdl),
+ .init = mlxsw_sp1_kvdl_init,
+ .fini = mlxsw_sp1_kvdl_fini,
+ .alloc = mlxsw_sp1_kvdl_alloc,
+ .free = mlxsw_sp1_kvdl_free,
+ .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query,
+};
+
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ static struct devlink_resource_size_params size_params;
+ u32 kvdl_max_size;
+ int err;
+
+ kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
new file mode 100644
index 000000000000..fc649fe7134e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
@@ -0,0 +1,374 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp1_mr_tcam_region {
+ struct mlxsw_sp *mlxsw_sp;
+ enum mlxsw_reg_rtar_key_type rtar_key_type;
+ struct parman *parman;
+ struct parman_prio *parman_prios;
+};
+
+struct mlxsw_sp1_mr_tcam {
+ struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+};
+
+struct mlxsw_sp1_mr_tcam_route {
+ struct parman_item parman_item;
+ struct parman_prio *parman_prio;
+};
+
+static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ ntohl(key->group.addr4),
+ ntohl(key->group_mask.addr4),
+ ntohl(key->source.addr4),
+ ntohl(key->source_mask.addr4),
+ mlxsw_afa_block_first_set(afa_block));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ key->group.addr6,
+ key->group_mask.addr6,
+ key->source.addr6,
+ key->source_mask.addr6,
+ mlxsw_afa_block_first_set(afa_block));
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, 0, 0, 0, 0, NULL);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
+ key->vrid, 0, 0, zero_addr, zero_addr,
+ zero_addr, zero_addr, NULL);
+ break;
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static struct mlxsw_sp1_mr_tcam_region *
+mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ return &mr_tcam->tcam_regions[proto];
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+ int err;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ err = parman_item_add(tcam_region->parman,
+ &tcam_region->parman_prios[prio],
+ &route->parman_item);
+ if (err)
+ return err;
+
+ route->parman_prio = &tcam_region->parman_prios[prio];
+ return 0;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam,
+ struct mlxsw_sp1_mr_tcam_route *route,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_region *tcam_region;
+
+ tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+ parman_item_remove(tcam_region->parman,
+ route->parman_prio, &route->parman_item);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ int err;
+
+ err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route,
+ key, prio);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+ if (err)
+ goto err_route_replace;
+ return 0;
+
+err_route_replace:
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key);
+ mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+
+ return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ key, afa_block);
+}
+
+#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
+ mr_tcam_region->rtar_key_type,
+ MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
+ mr_tcam_region->rtar_key_type, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
+ mr_tcam_region->rtar_key_type, new_count);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rrcr_pl[MLXSW_REG_RRCR_LEN];
+
+ mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
+ from_index, count,
+ mr_tcam_region->rtar_key_type, to_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
+}
+
+static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = {
+ .base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp1_mr_tcam_region_parman_resize,
+ .move = mlxsw_sp1_mr_tcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static int
+mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp1_mr_tcam_region *mr_tcam_region,
+ enum mlxsw_reg_rtar_key_type rtar_key_type)
+{
+ struct parman_prio *parman_prios;
+ struct parman *parman;
+ int err;
+ int i;
+
+ mr_tcam_region->rtar_key_type = rtar_key_type;
+ mr_tcam_region->mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region);
+ if (err)
+ return err;
+
+ parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops,
+ mr_tcam_region);
+ if (!parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ mr_tcam_region->parman = parman;
+
+ parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
+ sizeof(*parman_prios), GFP_KERNEL);
+ if (!parman_prios) {
+ err = -ENOMEM;
+ goto err_parman_prios_alloc;
+ }
+ mr_tcam_region->parman_prios = parman_prios;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_init(mr_tcam_region->parman,
+ &mr_tcam_region->parman_prios[i], i);
+ return 0;
+
+err_parman_prios_alloc:
+ parman_destroy(parman);
+err_parman_create:
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+ return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_fini(&mr_tcam_region->parman_prios[i]);
+ kfree(mr_tcam_region->parman_prios);
+ parman_destroy(mr_tcam_region->parman);
+ mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+}
+
+static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+ u32 rtar_key;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ return -EIO;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV4],
+ rtar_key);
+ if (err)
+ return err;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
+ err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV6],
+ rtar_key);
+ if (err)
+ goto err_ipv6_region_init;
+
+ return 0;
+
+err_ipv6_region_init:
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ return err;
+}
+
+static void mlxsw_sp1_mr_tcam_fini(void *priv)
+{
+ struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
+ mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp1_mr_tcam),
+ .init = mlxsw_sp1_mr_tcam_init,
+ .fini = mlxsw_sp1_mr_tcam_fini,
+ .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route),
+ .route_create = mlxsw_sp1_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp1_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp1_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
new file mode 100644
index 000000000000..d7f1fb35ea2a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -0,0 +1,222 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+#include "core_acl_flex_actions.h"
+
+struct mlxsw_sp2_acl_tcam {
+ u32 kvdl_index;
+ unsigned int kvdl_count;
+};
+
+struct mlxsw_sp2_acl_tcam_region {
+ struct mlxsw_sp_acl_ctcam_region cregion;
+};
+
+struct mlxsw_sp2_acl_tcam_chunk {
+ struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp2_acl_tcam_entry {
+ struct mlxsw_sp_acl_ctcam_entry centry;
+ struct mlxsw_afa_block *act_block;
+};
+
+static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+ struct mlxsw_sp_acl_tcam *_tcam)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+ struct mlxsw_afa_block *afa_block;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ char pgcr_pl[MLXSW_REG_PGCR_LEN];
+ char *enc_actions;
+ int i;
+ int err;
+
+ tcam->kvdl_count = _tcam->max_regions;
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, &tcam->kvdl_index);
+ if (err)
+ return err;
+
+ /* Create flex action block, set default action (continue)
+ * but don't commit. We need just the current set encoding
+ * to be written using PEFA register to all indexes for all regions.
+ */
+ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+ if (!afa_block) {
+ err = -ENOMEM;
+ goto err_afa_block;
+ }
+ err = mlxsw_afa_block_continue(afa_block);
+ if (WARN_ON(err))
+ goto err_afa_block_continue;
+ enc_actions = mlxsw_afa_block_cur_set(afa_block);
+
+ for (i = 0; i < tcam->kvdl_count; i++) {
+ mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
+ true, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ }
+ mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl);
+ if (err)
+ goto err_pgcr_write;
+
+ mlxsw_afa_block_destroy(afa_block);
+ return 0;
+
+err_pgcr_write:
+err_pefa_write:
+err_afa_block_continue:
+ mlxsw_afa_block_destroy(afa_block);
+err_afa_block:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_acl_tcam *tcam = priv;
+
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ tcam->kvdl_count, tcam->kvdl_index);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+ struct mlxsw_sp_acl_tcam_region *_region)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ int err;
+
+ err = mlxsw_sp_acl_atcam_region_init(mlxsw_sp, _region);
+ if (err)
+ return err;
+ return mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
+ _region);
+}
+
+static void
+mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+
+ mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+}
+
+static int
+mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+ unsigned int priority)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
+ priority);
+}
+
+static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
+{
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+
+ mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
+}
+
+static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ entry->act_block = rulei->act_block;
+ return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry,
+ rulei, true);
+}
+
+static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+ &chunk->cchunk, &entry->centry);
+}
+
+static int
+mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *entry_priv,
+ bool *activity)
+{
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ return mlxsw_afa_block_activity_get(entry->act_block, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
+ .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2,
+ .priv_size = sizeof(struct mlxsw_sp2_acl_tcam),
+ .init = mlxsw_sp2_acl_tcam_init,
+ .fini = mlxsw_sp2_acl_tcam_fini,
+ .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region),
+ .region_init = mlxsw_sp2_acl_tcam_region_init,
+ .region_fini = mlxsw_sp2_acl_tcam_region_fini,
+ .region_associate = mlxsw_sp2_acl_tcam_region_associate,
+ .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk),
+ .chunk_init = mlxsw_sp2_acl_tcam_chunk_init,
+ .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini,
+ .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
+ .entry_add = mlxsw_sp2_acl_tcam_entry_add,
+ .entry_del = mlxsw_sp2_acl_tcam_entry_del,
+ .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
new file mode 100644
index 000000000000..bacf7483c3fb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -0,0 +1,302 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "resources.h"
+
+struct mlxsw_sp2_kvdl_part_info {
+ u8 res_type;
+ /* For each defined partititon we need to know how many
+ * usage bits we need and how many indexes there are
+ * represented by a single bit. This could be got from FW
+ * querying appropriate resources. So have the resource
+ * ids for for this purpose in partition definition.
+ */
+ enum mlxsw_res_id usage_bit_count_res_id;
+ enum mlxsw_res_id index_range_res_id;
+};
+
+#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \
+ _usage_bit_count_res_id, _index_range_res_id) \
+[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \
+ .res_type = _res_type, \
+ .usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \
+ .index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \
+}
+
+static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
+ MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE),
+ MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS,
+ MAX_KVD_ACTION_SETS),
+ MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+};
+
+#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
+
+struct mlxsw_sp2_kvdl_part {
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ unsigned int usage_bit_count;
+ unsigned int indexes_per_usage_bit;
+ unsigned int last_allocated_bit;
+ unsigned long usage[0]; /* Usage bits */
+};
+
+struct mlxsw_sp2_kvdl {
+ struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN];
+};
+
+static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int bit_count,
+ unsigned int *p_bit)
+{
+ unsigned int start_bit;
+ unsigned int bit;
+ unsigned int i;
+ bool wrap = false;
+
+ start_bit = part->last_allocated_bit + 1;
+ if (start_bit == part->usage_bit_count)
+ start_bit = 0;
+ bit = start_bit;
+again:
+ bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit);
+ if (!wrap && bit + bit_count >= part->usage_bit_count) {
+ wrap = true;
+ bit = 0;
+ goto again;
+ }
+ if (wrap && bit + bit_count >= start_bit)
+ return -ENOBUFS;
+ for (i = 0; i < bit_count; i++) {
+ if (test_bit(bit + i, part->usage)) {
+ bit += bit_count;
+ goto again;
+ }
+ }
+ *p_bit = bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size,
+ u32 *p_kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit);
+ if (err)
+ return err;
+ for (i = 0; i < bit_count; i++)
+ __set_bit(bit + i, part->usage);
+ *p_kvdl_index = bit * part->indexes_per_usage_bit;
+ return 0;
+}
+
+static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type,
+ u16 size, u32 kvdl_index)
+{
+ char *iedr_pl;
+ int err;
+
+ iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL);
+ if (!iedr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_iedr_pack(iedr_pl);
+ mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl);
+ kfree(iedr_pl);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl_part *part,
+ unsigned int size, u32 kvdl_index)
+{
+ unsigned int bit_count;
+ unsigned int bit;
+ unsigned int i;
+ int err;
+
+ /* We need to ask FW to delete previously used KVD linear index */
+ err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type,
+ size, kvdl_index);
+ if (err)
+ return;
+
+ bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit);
+ bit = kvdl_index / part->indexes_per_usage_bit;
+ for (i = 0; i < bit_count; i++)
+ __clear_bit(bit + i, part->usage);
+}
+
+static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ u32 *p_entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index);
+}
+
+static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ int entry_index)
+{
+ unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type);
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+ struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type];
+
+ return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index);
+}
+
+static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ void *priv,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
+{
+ *p_alloc_count = entry_count;
+ return 0;
+}
+
+static struct mlxsw_sp2_kvdl_part *
+mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp2_kvdl_part_info *info)
+{
+ unsigned int indexes_per_usage_bit;
+ struct mlxsw_sp2_kvdl_part *part;
+ unsigned int index_range;
+ unsigned int usage_bit_count;
+ size_t usage_size;
+
+ if (!mlxsw_core_res_valid(mlxsw_sp->core,
+ info->usage_bit_count_res_id) ||
+ !mlxsw_core_res_valid(mlxsw_sp->core,
+ info->index_range_res_id))
+ return ERR_PTR(-EIO);
+ usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core,
+ info->usage_bit_count_res_id);
+ index_range = mlxsw_core_res_get(mlxsw_sp->core,
+ info->index_range_res_id);
+
+ /* For some partitions, one usage bit represents a group of indexes.
+ * That's why we compute the number of indexes per usage bit here,
+ * according to queried resources.
+ */
+ indexes_per_usage_bit = index_range / usage_bit_count;
+
+ usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return ERR_PTR(-ENOMEM);
+ part->info = info;
+ part->usage_bit_count = usage_bit_count;
+ part->indexes_per_usage_bit = indexes_per_usage_bit;
+ part->last_allocated_bit = usage_bit_count - 1;
+ return part;
+}
+
+static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part)
+{
+ kfree(part);
+}
+
+static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp2_kvdl *kvdl)
+{
+ const struct mlxsw_sp2_kvdl_part_info *info;
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp2_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
+ goto err_kvdl_part_init;
+ }
+ }
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+ return err;
+}
+
+static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl);
+}
+
+static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp2_kvdl *kvdl = priv;
+
+ mlxsw_sp2_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = {
+ .priv_size = sizeof(struct mlxsw_sp2_kvdl),
+ .init = mlxsw_sp2_kvdl_init,
+ .fini = mlxsw_sp2_kvdl_fini,
+ .alloc = mlxsw_sp2_kvdl_alloc,
+ .free = mlxsw_sp2_kvdl_free,
+ .alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
new file mode 100644
index 000000000000..53d4ab70da95
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -0,0 +1,82 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+
+#include "core_acl_flex_actions.h"
+#include "spectrum.h"
+#include "spectrum_mr.h"
+
+static int
+mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key)
+{
+}
+
+static int
+mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ return 0;
+}
+
+static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ return 0;
+}
+
+static void mlxsw_sp2_mr_tcam_fini(void *priv)
+{
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
+ .init = mlxsw_sp2_mr_tcam_init,
+ .fini = mlxsw_sp2_mr_tcam_fini,
+ .route_create = mlxsw_sp2_mr_tcam_route_create,
+ .route_destroy = mlxsw_sp2_mr_tcam_route_destroy,
+ .route_update = mlxsw_sp2_mr_tcam_route_update,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 79b1fa27a9a4..217621d79e26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -48,13 +48,12 @@
#include "spectrum.h"
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
-#include "spectrum_acl_flex_keys.h"
+#include "spectrum_acl_tcam.h"
struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_sp_fid *dummy_fid;
- const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
struct {
@@ -62,8 +61,7 @@ struct mlxsw_sp_acl {
unsigned long interval; /* ms */
#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
} rule_activity_update;
- unsigned long priv[0];
- /* priv has to be always the last item */
+ struct mlxsw_sp_acl_tcam tcam;
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
@@ -339,7 +337,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_init;
- err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
+ err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv);
if (err)
goto err_ops_ruleset_add;
@@ -409,7 +407,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
@@ -427,7 +425,7 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- ops = acl->ops->profile_ops(mlxsw_sp, profile);
+ ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
@@ -487,7 +485,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
unsigned int priority)
{
- rulei->priority = priority;
+ rulei->priority = priority >> 16;
}
void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
@@ -634,7 +632,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
- rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp),
+ GFP_KERNEL);
if (!rule) {
err = -ENOMEM;
goto err_alloc;
@@ -825,20 +824,20 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
- const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl;
+ size_t alloc_size;
int err;
- acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
+ alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
+ acl = kzalloc(alloc_size, GFP_KERNEL);
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
- mlxsw_sp_afk_blocks,
- MLXSW_SP_AFK_BLOCKS_COUNT);
+ mlxsw_sp->afk_ops);
if (!acl->afk) {
err = -ENOMEM;
goto err_afk_create;
@@ -857,12 +856,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules);
- err = acl_ops->init(mlxsw_sp, acl->priv);
+ err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
if (err)
goto err_acl_ops_init;
- acl->ops = acl_ops;
-
/* Create the delayed work for the rule activity_update */
INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
mlxsw_sp_acl_rul_activity_update_work);
@@ -884,10 +881,9 @@ err_afk_create:
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
- acl_ops->fini(mlxsw_sp, acl->priv);
+ mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
new file mode 100644
index 000000000000..b1b3b0e0c00e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -0,0 +1,95 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2018 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id)
+{
+ char perar_pl[MLXSW_REG_PERAR_LEN];
+ /* For now, just assume that every region has 12 key blocks */
+ u16 hw_region = region_id * 3;
+ u64 max_regions;
+
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+ if (hw_region >= max_regions)
+ return -ENOBUFS;
+
+ mlxsw_reg_perar_pack(perar_pl, region_id, hw_region);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl);
+}
+
+static int mlxsw_sp_acl_atcam_region_param_init(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id)
+{
+ char percr_pl[MLXSW_REG_PERCR_LEN];
+
+ mlxsw_reg_percr_pack(percr_pl, region_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl);
+}
+
+static int
+mlxsw_sp_acl_atcam_region_erp_init(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id)
+{
+ char pererp_pl[MLXSW_REG_PERERP_LEN];
+
+ mlxsw_reg_pererp_pack(pererp_pl, region_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl);
+}
+
+int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ int err;
+
+ err = mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
+ if (err)
+ return err;
+ err = mlxsw_sp_acl_atcam_region_param_init(mlxsw_sp, region->id);
+ if (err)
+ return err;
+ err = mlxsw_sp_acl_atcam_region_erp_init(mlxsw_sp, region->id);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
new file mode 100644
index 000000000000..ef0d4c0a5a1f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -0,0 +1,215 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+static int
+mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 new_size)
+{
+ char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
+ region->key_type, new_size, region->id,
+ region->tcam_region_info);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ u16 src_offset, u16 dst_offset, u16 size)
+{
+ char prcr_pl[MLXSW_REG_PRCR_LEN];
+
+ mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
+ region->tcam_region_info, src_offset,
+ region->tcam_region_info, dst_offset, size);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
+}
+
+static int
+mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int offset,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ char *act_set;
+ u32 priority;
+ char *mask;
+ char *key;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority,
+ fillup_priority);
+ if (err)
+ return err;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ region->tcam_region_info, offset, priority);
+ key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
+ mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
+
+ /* Only the first action set belongs here, the rest is in KVD */
+ act_set = mlxsw_afa_block_first_set(rulei->act_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region,
+ unsigned int offset)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+ region->tcam_region_info, offset, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count);
+}
+
+static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+ struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+ struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+
+ mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region,
+ from_index, to_index, count);
+}
+
+static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
+ .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp_acl_ctcam_region_parman_resize,
+ .move = mlxsw_sp_acl_ctcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region)
+{
+ cregion->region = region;
+ cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
+ cregion);
+ if (!cregion->parman)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+ parman_destroy(cregion->parman);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority)
+{
+ parman_prio_init(cregion->parman, &cchunk->parman_prio, priority);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk)
+{
+ parman_prio_fini(&cchunk->parman_prio);
+}
+
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority)
+{
+ int err;
+
+ err = parman_item_add(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region,
+ centry->parman_item.index,
+ rulei, fillup_priority);
+ if (err)
+ goto err_rule_insert;
+ return 0;
+
+err_rule_insert:
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+ return err;
+}
+
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion->region,
+ centry->parman_item.index);
+ parman_item_remove(cregion->parman, &cchunk->parman_prio,
+ &centry->parman_item);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 510ce48d87f7..bca0def756cd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -37,10 +37,8 @@
#include "core_acl_flex_actions.h"
#include "spectrum_span.h"
-#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
-
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
- char *enc_actions, bool is_first)
+ char *enc_actions, bool is_first, bool ca)
{
struct mlxsw_sp *mlxsw_sp = priv;
char pefa_pl[MLXSW_REG_PEFA_LEN];
@@ -53,11 +51,11 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
if (is_first)
return 0;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE,
- &kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, &kvdl_index);
if (err)
return err;
- mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
if (err)
goto err_pefa_write;
@@ -65,10 +63,25 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
return 0;
err_pefa_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
return err;
}
+static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, false);
+}
+
+static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions,
+ is_first, true);
+}
+
static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
bool is_first)
{
@@ -76,7 +89,29 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
if (is_first)
return;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+ 1, kvdl_index);
+}
+
+static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
+ bool *activity)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ int err;
+
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pefa_unpack(pefa_pl, activity);
+ return 0;
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
@@ -87,7 +122,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
u32 kvdl_index;
int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, &kvdl_index);
if (err)
return err;
mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
@@ -98,7 +134,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
return 0;
err_ppbs_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
return err;
}
@@ -106,7 +143,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
{
struct mlxsw_sp *mlxsw_sp = priv;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+ 1, kvdl_index);
}
static int
@@ -154,22 +192,36 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
}
-static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
- .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
+const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
+};
+
+const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
.counter_index_get = mlxsw_sp_act_counter_index_get,
.counter_index_put = mlxsw_sp_act_counter_index_put,
.mirror_add = mlxsw_sp_act_mirror_add,
.mirror_del = mlxsw_sp_act_mirror_del,
+ .dummy_first_set = true,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_ACTIONS_PER_SET),
- &mlxsw_sp_act_afa_ops, mlxsw_sp);
+ mlxsw_sp->afa_ops, mlxsw_sp);
return PTR_ERR_OR_ZERO(mlxsw_sp->afa);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
new file mode 100644
index 000000000000..aa8927cee376
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -0,0 +1,316 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "spectrum.h"
+#include "item.h"
+#include "core_acl_flex_keys.h"
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
+ MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
+ MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
+ MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
+ MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
+ MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
+ MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
+ MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
+ MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
+};
+
+#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
+
+static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
+ char *output)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
+ .blocks = mlxsw_sp1_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
+ .encode_block = mlxsw_sp1_afk_encode_block,
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
+ MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
+};
+
+static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
+ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
+ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5),
+ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
+ MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
+ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
+ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
+ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
+ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
+};
+
+#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36
+
+/* A block in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32|
+ * +-----------------------------------------------------------------------------------------------+
+ * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ */
+MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK);
+
+/* The key / mask block layout in Spectrum-2 is of the following form:
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | | | | | | | | | | | | | | | | | block11_high |
+ * +-----------------------------------------------------------------------------------------------+
+ * | block11_low | block10_high |
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * ...
+ */
+
+struct mlxsw_sp2_afk_block_layout {
+ unsigned short offset;
+ struct mlxsw_item item;
+};
+
+#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \
+ { \
+ .offset = _offset, \
+ { \
+ .shift = _shift, \
+ .size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \
+ .name = #_block, \
+ } \
+ } \
+
+static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8),
+ MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
+};
+
+static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
+ char *output)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+ const struct mlxsw_sp2_afk_block_layout *block_layout;
+
+ if (WARN_ON(block_index < 0 ||
+ block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout)))
+ return;
+
+ block_layout = &mlxsw_sp2_afk_blocks_layout[block_index];
+ __mlxsw_item_set64(output + block_layout->offset,
+ &block_layout->item, 0, block_value);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
+ .blocks = mlxsw_sp2_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
+ .encode_block = mlxsw_sp2_afk_encode_block,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
deleted file mode 100644
index fb8031828454..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-
-#include "core_acl_flex_keys.h"
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6),
- MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
- MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
- MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
- MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
- MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
- MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
- MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
- MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
- MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
- MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
- MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8),
- MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
- MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8),
-};
-
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
- MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
-};
-
-static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
- MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
- MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
- MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
- MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
- MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
- MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
- MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
- MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
- MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
- MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
- MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
- MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
-};
-
-#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks)
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index ad1b548e3cac..e06d7d9e5b7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -39,25 +39,25 @@
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
-#include <linux/parman.h>
#include "reg.h"
#include "core.h"
#include "resources.h"
#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
-struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
- unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
- unsigned int max_groups;
- unsigned int max_group_size;
-};
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->priv_size;
+}
-static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
{
- struct mlxsw_sp_acl_tcam *tcam = priv;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
u64 max_tcam_regions;
u64 max_regions;
u64 max_groups;
@@ -88,21 +88,53 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+ goto err_tcam_init;
+
return 0;
+err_tcam_init:
+ kfree(tcam->used_groups);
err_alloc_used_groups:
kfree(tcam->used_regions);
return err;
}
-static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
{
- struct mlxsw_sp_acl_tcam *tcam = priv;
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ ops->fini(mlxsw_sp, tcam->priv);
kfree(tcam->used_groups);
kfree(tcam->used_regions);
}
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority)
+{
+ u64 max_priority;
+
+ if (!fillup_priority) {
+ *priority = 0;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
+ return -EIO;
+
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
+ if (rulei->priority > max_priority)
+ return -EINVAL;
+
+ /* Unlike in TC, in HW, higher number means higher priority. */
+ *priority = max_priority - rulei->priority;
+ return 0;
+}
+
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
@@ -159,35 +191,21 @@ struct mlxsw_sp_acl_tcam_group {
unsigned int patterns_count;
};
-struct mlxsw_sp_acl_tcam_region {
- struct list_head list; /* Member of a TCAM group */
- struct list_head chunk_list; /* List of chunks under this region */
- struct parman *parman;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_acl_tcam_group *group;
- u16 id; /* ACL ID and region ID - they are same */
- char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
- struct mlxsw_afk_key_info *key_info;
- struct {
- struct parman_prio parman_prio;
- struct parman_item parman_item;
- struct mlxsw_sp_acl_rule_info *rulei;
- } catchall;
-};
-
struct mlxsw_sp_acl_tcam_chunk {
struct list_head list; /* Member of a TCAM region */
struct rhash_head ht_node; /* Member of a chunk HT */
unsigned int priority; /* Priority within the region and group */
- struct parman_prio parman_prio;
struct mlxsw_sp_acl_tcam_group *group;
struct mlxsw_sp_acl_tcam_region *region;
unsigned int ref_count;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
struct mlxsw_sp_acl_tcam_entry {
- struct parman_item parman_item;
struct mlxsw_sp_acl_tcam_chunk *chunk;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
@@ -441,9 +459,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
memcpy(out, elusage, sizeof(*out));
}
-#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
-
static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
@@ -455,6 +470,7 @@ mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
+ region->key_type,
MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
region->id, region->tcam_region_info);
encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
@@ -477,24 +493,13 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
{
char ptar_pl[MLXSW_REG_PTAR_LEN];
- mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
+ mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
+ region->key_type, 0, region->id,
region->tcam_region_info);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}
static int
-mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- u16 new_size)
-{
- char ptar_pl[MLXSW_REG_PTAR_LEN];
-
- mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
- new_size, region->id, region->tcam_region_info);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
-}
-
-static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
@@ -516,193 +521,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}
-static int
-mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- char *act_set;
- char *mask;
- char *key;
-
- mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset);
- key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
- mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
-
- /* Only the first action set belongs here, the rest is in KVD */
- act_set = mlxsw_afa_block_first_set(rulei->act_block);
- mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
-
- mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
- region->tcam_region_info, offset);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static int
-mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- unsigned int offset,
- bool *activity)
-{
- char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- int err;
-
- mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
- region->tcam_region_info, offset);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
- if (err)
- return err;
- *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
- return 0;
-}
-
-#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
-
-static int
-mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
-{
- struct parman_prio *parman_prio = &region->catchall.parman_prio;
- struct parman_item *parman_item = &region->catchall.parman_item;
- struct mlxsw_sp_acl_rule_info *rulei;
- int err;
-
- parman_prio_init(region->parman, parman_prio,
- MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
- err = parman_item_add(region->parman, parman_prio, parman_item);
- if (err)
- goto err_parman_item_add;
-
- rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
- if (IS_ERR(rulei)) {
- err = PTR_ERR(rulei);
- goto err_rulei_create;
- }
-
- err = mlxsw_sp_acl_rulei_act_continue(rulei);
- if (WARN_ON(err))
- goto err_rulei_act_continue;
-
- err = mlxsw_sp_acl_rulei_commit(rulei);
- if (err)
- goto err_rulei_commit;
-
- err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
- parman_item->index, rulei);
- region->catchall.rulei = rulei;
- if (err)
- goto err_rule_insert;
-
- return 0;
-
-err_rule_insert:
-err_rulei_commit:
-err_rulei_act_continue:
- mlxsw_sp_acl_rulei_destroy(rulei);
-err_rulei_create:
- parman_item_remove(region->parman, parman_prio, parman_item);
-err_parman_item_add:
- parman_prio_fini(parman_prio);
- return err;
-}
-
-static void
-mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region)
-{
- struct parman_prio *parman_prio = &region->catchall.parman_prio;
- struct parman_item *parman_item = &region->catchall.parman_item;
- struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
-
- mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
- parman_item->index);
- mlxsw_sp_acl_rulei_destroy(rulei);
- parman_item_remove(region->parman, parman_prio, parman_item);
- parman_prio_fini(parman_prio);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_region *region,
- u16 src_offset, u16 dst_offset, u16 size)
-{
- char prcr_pl[MLXSW_REG_PRCR_LEN];
-
- mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
- region->tcam_region_info, src_offset,
- region->tcam_region_info, dst_offset, size);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
-}
-
-static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
- unsigned long new_count)
-{
- struct mlxsw_sp_acl_tcam_region *region = priv;
- struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
- u64 max_tcam_rules;
-
- max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
- if (new_count > max_tcam_rules)
- return -EINVAL;
- return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
-}
-
-static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
- unsigned long from_index,
- unsigned long to_index,
- unsigned long count)
-{
- struct mlxsw_sp_acl_tcam_region *region = priv;
- struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
-
- mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
- from_index, to_index, count);
-}
-
-static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
- .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
- .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
- .resize = mlxsw_sp_acl_tcam_region_parman_resize,
- .move = mlxsw_sp_acl_tcam_region_parman_move,
- .algo = PARMAN_ALGO_TYPE_LSORT,
-};
-
static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
struct mlxsw_afk_element_usage *elusage)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_tcam_region *region;
int err;
- region = kzalloc(sizeof(*region), GFP_KERNEL);
+ region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
if (!region)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&region->chunk_list);
region->mlxsw_sp = mlxsw_sp;
- region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
- region);
- if (!region->parman) {
- err = -ENOMEM;
- goto err_parman_create;
- }
-
region->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(region->key_info)) {
err = PTR_ERR(region->key_info);
@@ -713,6 +547,11 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_region_id_get;
+ err = ops->region_associate(mlxsw_sp, region);
+ if (err)
+ goto err_tcam_region_associate;
+
+ region->key_type = ops->key_type;
err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
if (err)
goto err_tcam_region_alloc;
@@ -721,23 +560,22 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_tcam_region_enable;
- err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
+ err = ops->region_init(mlxsw_sp, region->priv, region);
if (err)
- goto err_tcam_region_catchall_add;
+ goto err_tcam_region_init;
return region;
-err_tcam_region_catchall_add:
+err_tcam_region_init:
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
err_tcam_region_enable:
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
err_tcam_region_alloc:
+err_tcam_region_associate:
mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
err_region_id_get:
mlxsw_afk_key_info_put(region->key_info);
err_key_info_get:
- parman_destroy(region->parman);
-err_parman_create:
kfree(region);
return ERR_PTR(err);
}
@@ -746,12 +584,13 @@ static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region)
{
- mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->region_fini(mlxsw_sp, region->priv);
mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
mlxsw_afk_key_info_put(region->key_info);
- parman_destroy(region->parman);
kfree(region);
}
@@ -826,13 +665,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
unsigned int priority,
struct mlxsw_afk_element_usage *elusage)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
return ERR_PTR(-EINVAL);
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
if (!chunk)
return ERR_PTR(-ENOMEM);
chunk->priority = priority;
@@ -844,7 +684,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_chunk_assoc;
- parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
+ ops->chunk_init(chunk->region->priv, chunk->priv, priority);
err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
@@ -854,7 +694,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
return chunk;
err_rhashtable_insert:
- parman_prio_fini(&chunk->parman_prio);
+ ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
err_chunk_assoc:
kfree(chunk);
@@ -865,11 +705,12 @@ static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *chunk)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_group *group = chunk->group;
rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
mlxsw_sp_acl_tcam_chunk_ht_params);
- parman_prio_fini(&chunk->parman_prio);
+ ops->chunk_fini(chunk->priv);
mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
kfree(chunk);
}
@@ -903,11 +744,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
}
+static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ return ops->entry_priv_size;
+}
+
static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
struct mlxsw_sp_acl_tcam_entry *entry,
struct mlxsw_sp_acl_rule_info *rulei)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk;
struct mlxsw_sp_acl_tcam_region *region;
int err;
@@ -918,24 +767,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(chunk);
region = chunk->region;
- err = parman_item_add(region->parman, &chunk->parman_prio,
- &entry->parman_item);
- if (err)
- goto err_parman_item_add;
- err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
- entry->parman_item.index,
- rulei);
+ err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
+ entry->priv, rulei);
if (err)
- goto err_rule_insert;
+ goto err_entry_add;
entry->chunk = chunk;
return 0;
-err_rule_insert:
- parman_item_remove(region->parman, &chunk->parman_prio,
- &entry->parman_item);
-err_parman_item_add:
+err_entry_add:
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
return err;
}
@@ -943,13 +784,11 @@ err_parman_item_add:
static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
- entry->parman_item.index);
- parman_item_remove(region->parman, &chunk->parman_prio,
- &entry->parman_item);
+ ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
@@ -958,22 +797,24 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry,
bool *activity)
{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
- return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
- entry->parman_item.index,
- activity);
+ return ops->entry_activity_get(mlxsw_sp, region->priv,
+ entry->priv, activity);
}
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
- MLXSW_AFK_ELEMENT_DMAC,
- MLXSW_AFK_ELEMENT_SMAC,
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP4,
- MLXSW_AFK_ELEMENT_DST_IP4,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
@@ -987,10 +828,14 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
MLXSW_AFK_ELEMENT_ETHERTYPE,
MLXSW_AFK_ELEMENT_IP_PROTO,
- MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- MLXSW_AFK_ELEMENT_DST_IP6_HI,
- MLXSW_AFK_ELEMENT_DST_IP6_LO,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
MLXSW_AFK_ELEMENT_DST_L4_PORT,
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
};
@@ -1019,10 +864,10 @@ struct mlxsw_sp_acl_tcam_flower_rule {
static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
- void *priv, void *ruleset_priv)
+ struct mlxsw_sp_acl_tcam *tcam,
+ void *ruleset_priv)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- struct mlxsw_sp_acl_tcam *tcam = priv;
return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
mlxsw_sp_acl_tcam_patterns,
@@ -1070,6 +915,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
}
+static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
+ mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+}
+
static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv, void *rule_priv,
@@ -1107,7 +958,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
.ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
.ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
- .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
+ .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
@@ -1118,7 +969,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = {
[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
};
-static const struct mlxsw_sp_acl_profile_ops *
+const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_acl_profile profile)
{
@@ -1131,10 +982,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
return NULL;
return ops;
}
-
-const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
- .priv_size = sizeof(struct mlxsw_sp_acl_tcam),
- .init = mlxsw_sp_acl_tcam_init,
- .fini = mlxsw_sp_acl_tcam_fini,
- .profile_ops = mlxsw_sp_acl_tcam_profile_ops,
-};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
new file mode 100644
index 000000000000..68551da2221d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -0,0 +1,151 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H
+#define _MLXSW_SPECTRUM_ACL_TCAM_H
+
+#include <linux/list.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_sp_acl_tcam {
+ unsigned long *used_regions; /* bit array */
+ unsigned int max_regions;
+ unsigned long *used_groups; /* bit array */
+ unsigned int max_groups;
+ unsigned int max_group_size;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam);
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 *priority, bool fillup_priority);
+
+struct mlxsw_sp_acl_profile_ops {
+ size_t ruleset_priv_size;
+ int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv);
+ void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ u16 (*ruleset_group_id)(void *ruleset_priv);
+ size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp);
+ int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
+ void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+ bool *activity);
+};
+
+const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_acl_profile profile);
+
+#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
+
+#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+
+struct mlxsw_sp_acl_tcam_group;
+
+struct mlxsw_sp_acl_tcam_region {
+ struct list_head list; /* Member of a TCAM group */
+ struct list_head chunk_list; /* List of chunks under this region */
+ struct mlxsw_sp_acl_tcam_group *group;
+ enum mlxsw_reg_ptar_key_type key_type;
+ u16 id; /* ACL ID and region ID - they are same */
+ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
+ struct mlxsw_afk_key_info *key_info;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_ctcam_region {
+ struct parman *parman;
+ struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp_acl_ctcam_chunk {
+ struct parman_prio parman_prio;
+};
+
+struct mlxsw_sp_acl_ctcam_entry {
+ struct parman_item parman_item;
+};
+
+int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_tcam_region *region);
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ unsigned int priority);
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk);
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool fillup_priority);
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry);
+static inline unsigned int
+mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+ return centry->parman_item.index;
+}
+
+int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp,
+ u16 region_id);
+int mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_region *region);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 89dbf569dff5..201761a3539e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -144,10 +144,12 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
FLOW_DISSECTOR_KEY_IPV4_ADDRS,
f->mask);
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
- ntohl(key->src), ntohl(mask->src));
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
- ntohl(key->dst), ntohl(mask->dst));
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ (char *) &key->src,
+ (char *) &mask->src, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ (char *) &key->dst,
+ (char *) &mask->dst, 4);
}
static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
@@ -161,24 +163,31 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_IPV6_ADDRS,
f->mask);
- size_t addr_half_size = sizeof(key->src) / 2;
-
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
- &key->src.s6_addr[0],
- &mask->src.s6_addr[0],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
- &key->src.s6_addr[addr_half_size],
- &mask->src.s6_addr[addr_half_size],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
- &key->dst.s6_addr[0],
- &mask->dst.s6_addr[0],
- addr_half_size);
- mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
- &key->dst.s6_addr[addr_half_size],
- &mask->dst.s6_addr[addr_half_size],
- addr_half_size);
+
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ &key->src.s6_addr[0x0],
+ &mask->src.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ &key->src.s6_addr[0x4],
+ &mask->src.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ &key->src.s6_addr[0x8],
+ &mask->src.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ &key->src.s6_addr[0xC],
+ &mask->src.s6_addr[0xC], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ &key->dst.s6_addr[0x0],
+ &mask->dst.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ &key->dst.s6_addr[0x4],
+ &mask->dst.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ &key->dst.s6_addr[0x8],
+ &mask->dst.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ &key->dst.s6_addr[0xC],
+ &mask->dst.s6_addr[0xC], 4);
}
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
@@ -340,13 +349,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
f->mask);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
- MLXSW_AFK_ELEMENT_DMAC,
- key->dst, mask->dst,
- sizeof(key->dst));
+ MLXSW_AFK_ELEMENT_DMAC_32_47,
+ key->dst, mask->dst, 2);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_DMAC_0_31,
+ key->dst + 2, mask->dst + 2, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei,
+ MLXSW_AFK_ELEMENT_SMAC_32_47,
+ key->src, mask->src, 2);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
- MLXSW_AFK_ELEMENT_SMAC,
- key->src, mask->src,
- sizeof(key->src));
+ MLXSW_AFK_ELEMENT_SMAC_0_31,
+ key->src + 2, mask->src + 2, 4);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index fe4327f547d2..fd557585514d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,422 +33,74 @@
*/
#include <linux/kernel.h>
-#include <linux/bitops.h>
+#include <linux/slab.h>
#include "spectrum.h"
-#define MLXSW_SP_KVDL_SINGLE_BASE 0
-#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
-#define MLXSW_SP_KVDL_SINGLE_END \
- (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
-
-#define MLXSW_SP_KVDL_CHUNKS_BASE \
- (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
-#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
-#define MLXSW_SP_KVDL_CHUNKS_END \
- (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
- (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
- (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
- (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
-#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
-
-struct mlxsw_sp_kvdl_part_info {
- unsigned int part_index;
- unsigned int start_index;
- unsigned int end_index;
- unsigned int alloc_size;
- enum mlxsw_sp_resource_id resource_id;
-};
-
-enum mlxsw_sp_kvdl_part_id {
- MLXSW_SP_KVDL_PART_ID_SINGLE,
- MLXSW_SP_KVDL_PART_ID_CHUNKS,
- MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
-};
-
-#define MLXSW_SP_KVDL_PART_INFO(id) \
-[MLXSW_SP_KVDL_PART_ID_##id] = { \
- .start_index = MLXSW_SP_KVDL_##id##_BASE, \
- .end_index = MLXSW_SP_KVDL_##id##_END, \
- .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \
- .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
-}
-
-static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
- MLXSW_SP_KVDL_PART_INFO(SINGLE),
- MLXSW_SP_KVDL_PART_INFO(CHUNKS),
- MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
-};
-
-#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
-
-struct mlxsw_sp_kvdl_part {
- struct mlxsw_sp_kvdl_part_info info;
- unsigned long usage[0]; /* Entries */
-};
-
struct mlxsw_sp_kvdl {
- struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+ unsigned long priv[0];
+ /* priv has to be always the last item */
};
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
- unsigned int alloc_size)
-{
- struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- part = kvdl->parts[i];
- if (alloc_size <= part->info.alloc_size &&
- (!min_part ||
- part->info.alloc_size <= min_part->info.alloc_size))
- min_part = part;
- }
-
- return min_part ?: ERR_PTR(-ENOBUFS);
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
-{
- struct mlxsw_sp_kvdl_part *part;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- part = kvdl->parts[i];
- if (kvdl_index >= part->info.start_index &&
- kvdl_index <= part->info.end_index)
- return part;
- }
-
- return ERR_PTR(-EINVAL);
-}
-
-static u32
-mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
- unsigned int entry_index)
-{
- return info->start_index + entry_index * info->alloc_size;
-}
-
-static unsigned int
-mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
- u32 kvdl_index)
-{
- return (kvdl_index - info->start_index) / info->alloc_size;
-}
-
-static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
- u32 *p_kvdl_index)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int entry_index, nr_entries;
-
- nr_entries = (info->end_index - info->start_index + 1) /
- info->alloc_size;
- entry_index = find_first_zero_bit(part->usage, nr_entries);
- if (entry_index == nr_entries)
- return -ENOBUFS;
- __set_bit(entry_index, part->usage);
-
- *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
-
- return 0;
-}
-
-static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
- u32 kvdl_index)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int entry_index;
-
- entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
- __clear_bit(entry_index, part->usage);
-}
-
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
- u32 *p_entry_index)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- /* Find partition with smallest allocation size satisfying the
- * requested size.
- */
- part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
- if (IS_ERR(part))
- return PTR_ERR(part);
-
- return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
-}
-
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
- if (IS_ERR(part))
- return;
- mlxsw_sp_kvdl_part_free(part, entry_index);
-}
-
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
- unsigned int entry_count,
- unsigned int *p_alloc_size)
-{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
- if (IS_ERR(part))
- return PTR_ERR(part);
-
- *p_alloc_size = part->info.alloc_size;
-
- return 0;
-}
-
-static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
- struct mlxsw_sp_kvdl_part *part_prev,
- unsigned int size)
-{
-
- if (!part_prev) {
- part->info.end_index = size - 1;
- } else {
- part->info.start_index = part_prev->info.end_index + 1;
- part->info.end_index = part->info.start_index + size - 1;
- }
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_kvdl_part_info *info,
- struct mlxsw_sp_kvdl_part *part_prev)
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_kvdl_part *part;
- bool need_update = true;
- unsigned int nr_entries;
- size_t usage_size;
- u64 resource_size;
+ const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
+ struct mlxsw_sp_kvdl *kvdl;
int err;
- err = devlink_resource_size_get(devlink, info->resource_id,
- &resource_size);
- if (err) {
- need_update = false;
- resource_size = info->end_index - info->start_index + 1;
- }
-
- nr_entries = div_u64(resource_size, info->alloc_size);
- usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
- part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
- if (!part)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&part->info, info, sizeof(part->info));
-
- if (need_update)
- mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
- return part;
-}
-
-static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
-{
- kfree(part);
-}
-
-static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- const struct mlxsw_sp_kvdl_part_info *info;
- struct mlxsw_sp_kvdl_part *part_prev = NULL;
- int err, i;
+ kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
+ GFP_KERNEL);
+ if (!kvdl)
+ return -ENOMEM;
+ kvdl->kvdl_ops = kvdl_ops;
+ mlxsw_sp->kvdl = kvdl;
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
- info = &mlxsw_sp_kvdl_parts_info[i];
- kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
- part_prev);
- if (IS_ERR(kvdl->parts[i])) {
- err = PTR_ERR(kvdl->parts[i]);
- goto err_kvdl_part_init;
- }
- part_prev = kvdl->parts[i];
- }
+ err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
+ if (err)
+ goto err_init;
return 0;
-err_kvdl_part_init:
- for (i--; i >= 0; i--)
- mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
+err_init:
+ kfree(kvdl);
return err;
}
-static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
- mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
-}
-
-static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
-{
- const struct mlxsw_sp_kvdl_part_info *info = &part->info;
- unsigned int nr_entries;
- int bit = -1;
- u64 occ = 0;
-
- nr_entries = (info->end_index -
- info->start_index + 1) /
- info->alloc_size;
- while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
- < nr_entries)
- occ += info->alloc_size;
- return occ;
-}
-
-static u64 mlxsw_sp_kvdl_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- u64 occ = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
- occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
-
- return occ;
-}
-
-static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
- return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
- return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
-{
- const struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_kvdl_part *part;
- part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
- return mlxsw_sp_kvdl_part_occ(part);
+ kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+ kfree(kvdl);
}
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, u32 *p_entry_index)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- static struct devlink_resource_size_params size_params;
- u32 kvdl_max_size;
- int err;
-
- kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
- MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
- MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
-
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
- MLXSW_SP_KVDL_SINGLE_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- if (err)
- return err;
-
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
- MLXSW_SP_KVDL_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- if (err)
- return err;
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
- MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
- DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
- return err;
+ return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_entry_index);
}
-int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count, int entry_index)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- struct mlxsw_sp_kvdl *kvdl;
- int err;
-
- kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
- if (!kvdl)
- return -ENOMEM;
- mlxsw_sp->kvdl = kvdl;
-
- err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
- if (err)
- goto err_kvdl_parts_init;
-
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- mlxsw_sp_kvdl_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- mlxsw_sp_kvdl_single_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- mlxsw_sp_kvdl_chunks_occ_get,
- mlxsw_sp);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- mlxsw_sp_kvdl_large_chunks_occ_get,
- mlxsw_sp);
-
- return 0;
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
-err_kvdl_parts_init:
- kfree(mlxsw_sp->kvdl);
- return err;
+ kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
+ entry_count, entry_index);
}
-void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_kvdl_entry_type type,
+ unsigned int entry_count,
+ unsigned int *p_alloc_count)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR);
- mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
- kfree(mlxsw_sp->kvdl);
+ return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_alloc_count);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index a82539609d49..98dcaf78365c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1075,6 +1075,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
cancel_delayed_work_sync(&mr->stats_update_dw);
- mr->mr_ops->fini(mr->priv);
+ mr->mr_ops->fini(mlxsw_sp, mr->priv);
kfree(mr);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
index 7c864a86811d..c92fa90dca31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -46,15 +46,6 @@ enum mlxsw_sp_mr_route_action {
MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
};
-enum mlxsw_sp_mr_route_prio {
- MLXSW_SP_MR_ROUTE_PRIO_SG,
- MLXSW_SP_MR_ROUTE_PRIO_STARG,
- MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
- __MLXSW_SP_MR_ROUTE_PRIO_MAX
-};
-
-#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
-
struct mlxsw_sp_mr_route_key {
int vrid;
enum mlxsw_sp_l3proto proto;
@@ -101,7 +92,7 @@ struct mlxsw_sp_mr_ops {
u16 erif_index);
void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv);
- void (*fini)(void *priv);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
};
struct mlxsw_sp_mr;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 4f4c0d311883..e9c9f1f45b9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -1,7 +1,8 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -35,7 +36,6 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
-#include <linux/parman.h>
#include "spectrum_mr_tcam.h"
#include "reg.h"
@@ -43,15 +43,8 @@
#include "core_acl_flex_actions.h"
#include "spectrum_mr.h"
-struct mlxsw_sp_mr_tcam_region {
- struct mlxsw_sp *mlxsw_sp;
- enum mlxsw_reg_rtar_key_type rtar_key_type;
- struct parman *parman;
- struct parman_prio *parman_prios;
-};
-
struct mlxsw_sp_mr_tcam {
- struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+ void *priv;
};
/* This struct maps to one RIGR2 register entry */
@@ -84,8 +77,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
INIT_LIST_HEAD(&erif_list->erif_sublists);
}
-#define MLXSW_SP_KVDL_RIGR2_SIZE 1
-
static struct mlxsw_sp_mr_erif_sublist *
mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list)
@@ -96,8 +87,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
if (!erif_sublist)
return ERR_PTR(-ENOMEM);
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
- &erif_sublist->rigr2_kvdl_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, &erif_sublist->rigr2_kvdl_index);
if (err) {
kfree(erif_sublist);
return ERR_PTR(err);
@@ -112,7 +103,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_erif_sublist *erif_sublist)
{
list_del(&erif_sublist->list);
- mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ 1, erif_sublist->rigr2_kvdl_index);
kfree(erif_sublist);
}
@@ -221,12 +213,11 @@ struct mlxsw_sp_mr_tcam_route {
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
u32 counter_index;
- struct parman_item parman_item;
- struct parman_prio *parman_prio;
enum mlxsw_sp_mr_route_action action;
struct mlxsw_sp_mr_route_key key;
u16 irif_index;
u16 min_mtu;
+ void *priv;
};
static struct mlxsw_afa_block *
@@ -297,60 +288,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
mlxsw_afa_block_destroy(afa_block);
}
-static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
- struct parman_item *parman_item,
- struct mlxsw_sp_mr_route_key *key,
- struct mlxsw_afa_block *afa_block)
-{
- char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
- switch (key->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
- key->vrid,
- MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
- ntohl(key->group.addr4),
- ntohl(key->group_mask.addr4),
- ntohl(key->source.addr4),
- ntohl(key->source_mask.addr4),
- mlxsw_afa_block_first_set(afa_block));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
- key->vrid,
- MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
- key->group.addr6,
- key->group_mask.addr6,
- key->source.addr6,
- key->source_mask.addr6,
- mlxsw_afa_block_first_set(afa_block));
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
-static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
- struct mlxsw_sp_mr_route_key *key,
- struct parman_item *parman_item)
-{
- struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
- char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
- switch (key->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
- vrid, 0, 0, 0, 0, 0, 0, NULL);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
- vrid, 0, 0, zero_addr, zero_addr,
- zero_addr, zero_addr, NULL);
- break;
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
static int
mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_erif_list *erif_list,
@@ -370,51 +307,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static struct mlxsw_sp_mr_tcam_region *
-mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
- enum mlxsw_sp_l3proto proto)
-{
- return &mr_tcam->tcam_regions[proto];
-}
-
-static int
-mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
- struct mlxsw_sp_mr_tcam_route *route,
- enum mlxsw_sp_mr_route_prio prio)
-{
- struct mlxsw_sp_mr_tcam_region *tcam_region;
- int err;
-
- tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
- route->key.proto);
- err = parman_item_add(tcam_region->parman,
- &tcam_region->parman_prios[prio],
- &route->parman_item);
- if (err)
- return err;
-
- route->parman_prio = &tcam_region->parman_prios[prio];
- return 0;
-}
-
-static void
-mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
- struct mlxsw_sp_mr_tcam_route *route)
-{
- struct mlxsw_sp_mr_tcam_region *tcam_region;
-
- tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
- route->key.proto);
-
- parman_item_remove(tcam_region->parman,
- route->parman_prio, &route->parman_item);
-}
-
static int
mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_params *route_params)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
int err;
@@ -448,22 +346,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
goto err_afa_block_create;
}
- /* Allocate place in the TCAM */
- err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
- route_params->prio);
- if (err)
- goto err_parman_item_add;
+ route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
+ if (!route->priv) {
+ err = -ENOMEM;
+ goto err_route_priv_alloc;
+ }
/* Write the route to the TCAM */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, route->afa_block);
+ err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
+ &route->key, route->afa_block,
+ route_params->prio);
if (err)
- goto err_route_replace;
+ goto err_route_create;
return 0;
-err_route_replace:
- mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
-err_parman_item_add:
+err_route_create:
+ kfree(route->priv);
+err_route_priv_alloc:
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
err_afa_block_create:
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
@@ -476,12 +375,12 @@ err_counter_alloc:
static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
void *priv, void *route_priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
- &route->key, &route->parman_item);
- mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+ ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
+ kfree(route->priv);
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
@@ -502,6 +401,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv,
enum mlxsw_sp_mr_route_action route_action)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
@@ -516,8 +416,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
@@ -534,6 +433,7 @@ err:
static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 min_mtu)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_afa_block *afa_block;
int err;
@@ -549,8 +449,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(afa_block);
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err;
@@ -596,6 +495,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
void *route_priv, u16 erif_index)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_erif_sublist *erif_sublist;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
@@ -630,8 +530,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
}
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
@@ -653,6 +552,7 @@ static int
mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
struct mlxsw_sp_mr_route_info *route_info)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam_route *route = route_priv;
struct mlxsw_sp_mr_tcam_erif_list erif_list;
struct mlxsw_afa_block *afa_block;
@@ -677,8 +577,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
}
/* Update the TCAM route entry */
- err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
- &route->key, afa_block);
+ err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
if (err)
goto err_route_write;
@@ -699,167 +598,36 @@ err_erif_populate:
return err;
}
-#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
-
-static int
-mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
-
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
- mr_tcam_region->rtar_key_type,
- MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void
-mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
-
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
- mr_tcam_region->rtar_key_type, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
- unsigned long new_count)
-{
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rtar_pl[MLXSW_REG_RTAR_LEN];
- u64 max_tcam_rules;
-
- max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
- if (new_count > max_tcam_rules)
- return -EINVAL;
- mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
- mr_tcam_region->rtar_key_type, new_count);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
- unsigned long from_index,
- unsigned long to_index,
- unsigned long count)
-{
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
- struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
- char rrcr_pl[MLXSW_REG_RRCR_LEN];
-
- mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
- from_index, count,
- mr_tcam_region->rtar_key_type, to_index);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
-}
-
-static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
- .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
- .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
- .resize = mlxsw_sp_mr_tcam_region_parman_resize,
- .move = mlxsw_sp_mr_tcam_region_parman_move,
- .algo = PARMAN_ALGO_TYPE_LSORT,
-};
-
-static int
-mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
- enum mlxsw_reg_rtar_key_type rtar_key_type)
-{
- struct parman_prio *parman_prios;
- struct parman *parman;
- int err;
- int i;
-
- mr_tcam_region->rtar_key_type = rtar_key_type;
- mr_tcam_region->mlxsw_sp = mlxsw_sp;
-
- err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
- if (err)
- return err;
-
- parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
- mr_tcam_region);
- if (!parman) {
- err = -ENOMEM;
- goto err_parman_create;
- }
- mr_tcam_region->parman = parman;
-
- parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
- sizeof(*parman_prios), GFP_KERNEL);
- if (!parman_prios) {
- err = -ENOMEM;
- goto err_parman_prios_alloc;
- }
- mr_tcam_region->parman_prios = parman_prios;
-
- for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
- parman_prio_init(mr_tcam_region->parman,
- &mr_tcam_region->parman_prios[i], i);
- return 0;
-
-err_parman_prios_alloc:
- parman_destroy(parman);
-err_parman_create:
- mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
- return err;
-}
-
-static void
-mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
- int i;
-
- for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
- parman_prio_fini(&mr_tcam_region->parman_prios[i]);
- kfree(mr_tcam_region->parman_prios);
- parman_destroy(mr_tcam_region->parman);
- mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
-}
-
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- u32 rtar_key;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
- !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
return -EIO;
- rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
- err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &region[MLXSW_SP_L3_PROTO_IPV4],
- rtar_key);
- if (err)
- return err;
+ mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
+ if (!mr_tcam->priv)
+ return -ENOMEM;
- rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
- err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &region[MLXSW_SP_L3_PROTO_IPV6],
- rtar_key);
+ err = ops->init(mlxsw_sp, mr_tcam->priv);
if (err)
- goto err_ipv6_region_init;
-
+ goto err_init;
return 0;
-err_ipv6_region_init:
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+err_init:
+ kfree(mr_tcam->priv);
return err;
}
-static void mlxsw_sp_mr_tcam_fini(void *priv)
+static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
- struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
- mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ ops->fini(mr_tcam->priv);
+ kfree(mr_tcam->priv);
}
const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 77b2adb29341..8d67f0123699 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -48,6 +48,7 @@
#include <linux/route.h>
#include <linux/gcd.h>
#include <linux/random.h>
+#include <linux/if_macvlan.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -60,6 +61,7 @@
#include <net/ndisc.h>
#include <net/ipv6.h>
#include <net/fib_notifier.h>
+#include <net/switchdev.h>
#include "spectrum.h"
#include "core.h"
@@ -163,7 +165,9 @@ struct mlxsw_sp_rif_ops {
const struct mlxsw_sp_rif_params *params);
int (*configure)(struct mlxsw_sp_rif *rif);
void (*deconfigure)(struct mlxsw_sp_rif *rif);
- struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
+ struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack);
+ void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
@@ -342,10 +346,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
-
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
struct mlxsw_sp_prefix_usage {
@@ -1109,7 +1109,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
u32 tunnel_index;
int err;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, &tunnel_index);
if (err)
return err;
@@ -1125,7 +1126,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
- mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ 1, fib_entry->decap.tunnel_index);
}
static struct mlxsw_sp_fib_node *
@@ -3165,8 +3167,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
* by the device and make sure the request can be satisfied.
*/
mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
- err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
- &alloc_size);
+ err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ *p_adj_grp_size, &alloc_size);
if (err)
return err;
/* It is possible the allocation results in more allocated
@@ -3278,7 +3281,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* No valid allocation size available. */
goto set_trap;
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ ecmp_size, &adj_index);
if (err) {
/* We ran out of KVD linear space, just set the
* trap and let everything flow through kernel.
@@ -3313,7 +3317,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
old_adj_index, old_ecmp_size);
- mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ old_ecmp_size, old_adj_index);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
goto set_trap;
@@ -3335,7 +3340,8 @@ set_trap:
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
if (old_adj_index_valid)
- mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+ nh_grp->ecmp_size, nh_grp->adj_index);
}
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
@@ -5967,7 +5973,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct mlxsw_sp_rif *
+struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
@@ -6024,6 +6030,12 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
!list_empty(&inet6_dev->addr_list))
addr_list_empty = false;
+ /* macvlans do not have a RIF, but rather piggy back on the
+ * RIF of their lower device.
+ */
+ if (netif_is_macvlan(dev) && addr_list_empty)
+ return true;
+
if (rif && addr_list_empty &&
!netif_is_l3_slave(rif->dev))
return true;
@@ -6125,6 +6137,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
+{
+ return rif->fid;
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -6162,7 +6179,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->ops = ops;
if (ops->fid_get) {
- fid = ops->fid_get(rif);
+ fid = ops->fid_get(rif, extack);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto err_fid_get;
@@ -6267,7 +6284,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
}
/* FID was already created, just take a reference */
- fid = rif->ops->fid_get(rif);
+ fid = rif->ops->fid_get(rif, extack);
err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
if (err)
goto err_fid_port_vid_map;
@@ -6432,6 +6449,123 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
+{
+ u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp4, mask);
+}
+
+static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
+{
+ u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+ return ether_addr_equal_masked(mac, vrrp6, mask);
+}
+
+static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+ const u8 *mac, bool adding)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u8 vrrp_id = adding ? mac[5] : 0;
+ int err;
+
+ if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
+ !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
+ return 0;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
+ mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
+ else
+ mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+ int err;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ if (!rif) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
+ macvlan_dev->dev_addr, true);
+ if (err)
+ goto err_rif_vrrp_add;
+
+ /* Make sure the bridge driver does not have this MAC pointing at
+ * some other port.
+ */
+ if (rif->ops->fdb_del)
+ rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
+
+ return 0;
+
+err_rif_vrrp_add:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ return err;
+}
+
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+ /* If we do not have a RIF, then we already took care of
+ * removing the macvlan's MAC during RIF deletion.
+ */
+ if (!rif)
+ return;
+ mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
+ false);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp;
+
+ mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ break;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
unsigned long event,
struct netlink_ext_ack *extack)
@@ -6444,6 +6578,8 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
+ else if (netif_is_macvlan(dev))
+ return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
else
return 0;
}
@@ -6684,7 +6820,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
int err = 0;
- if (!mlxsw_sp)
+ /* We do not create a RIF for a macvlan, but only use it to
+ * direct more MAC addresses to the router.
+ */
+ if (!mlxsw_sp || netif_is_macvlan(l3_dev))
return 0;
switch (event) {
@@ -6705,6 +6844,27 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
+{
+ struct mlxsw_sp_rif *rif = data;
+
+ if (!netif_is_macvlan(dev))
+ return 0;
+
+ return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
+{
+ if (!netif_is_macvlan_port(rif->dev))
+ return 0;
+
+ netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
+ return netdev_walk_all_upper_dev_rcu(rif->dev,
+ __mlxsw_sp_rif_macvlan_flush, rif);
+}
+
static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
{
@@ -6771,11 +6931,13 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_rif_subport_op(rif, false);
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
}
@@ -6857,6 +7019,7 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6865,19 +7028,49 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
- u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
+ u16 vid;
+ int err;
+
+ if (is_vlan_dev(rif->dev)) {
+ vid = vlan_dev_vlan_id(rif->dev);
+ } else {
+ err = br_vlan_get_pvid(rif->dev, &vid);
+ if (err < 0 || !vid) {
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
+ return ERR_PTR(-EINVAL);
+ }
+ }
return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
}
+static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *br_dev;
+ struct net_device *dev;
+
+ br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
+ dev = br_fdb_find_port(br_dev, mac, vid);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = vid;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_vlan_configure,
.deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
@@ -6929,6 +7122,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_rif_set(fid, NULL);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6937,17 +7131,33 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_fid *
-mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
}
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *dev;
+
+ dev = br_fdb_find_port(rif->dev, mac, 0);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = 0;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
.type = MLXSW_SP_RIF_TYPE_FID,
.rif_size = sizeof(struct mlxsw_sp_rif),
.configure = mlxsw_sp_rif_fid_configure,
.deconfigure = mlxsw_sp_rif_fid_deconfigure,
.fid_get = mlxsw_sp_rif_fid_fid_get,
+ .fdb_del = mlxsw_sp_rif_fid_fdb_del,
};
static struct mlxsw_sp_rif_ipip_lb *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index a01edcf56797..52e25695625c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -66,6 +66,8 @@ struct mlxsw_sp_neigh_entry;
struct mlxsw_sp_nexthop;
struct mlxsw_sp_ipip_entry;
+struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
@@ -75,6 +77,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 3d187d88cc7c..e42d640cddab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <net/arp.h>
#include <net/gre.h>
+#include <net/lag.h>
#include <net/ndisc.h>
#include <net/ip6_tunnel.h>
@@ -254,7 +255,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
struct list_head *iter;
netdev_for_each_lower_dev(lag_dev, dev, iter)
- if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
+ if (netif_carrier_ok(dev) &&
+ net_lag_port_dev_txable(dev) &&
+ mlxsw_sp_port_dev_check(dev))
return dev;
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index eea5666a86b2..da94e1eb9e16 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1135,6 +1135,39 @@ err_port_vlan_set:
return err;
}
+static int
+mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_fid *fid;
+ u16 pvid;
+ u16 vid;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
+ if (!rif)
+ return 0;
+ fid = mlxsw_sp_rif_fid(rif);
+ pvid = mlxsw_sp_fid_8021q_vid(fid);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
@@ -1146,8 +1179,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
- if (netif_is_bridge_master(orig_dev))
- return -EOPNOTSUPP;
+ if (netif_is_bridge_master(orig_dev)) {
+ int err = 0;
+
+ if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
+ br_vlan_enabled(orig_dev) &&
+ switchdev_trans_ph_prepare(trans))
+ err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
+ orig_dev, vlan);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
if (switchdev_trans_ph_prepare(trans))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 399e9d6993f7..eb437f59640d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -63,6 +63,7 @@ enum {
MLXSW_TRAP_ID_LBERROR = 0x54,
MLXSW_TRAP_ID_IPV4_OSPF = 0x55,
MLXSW_TRAP_ID_IPV4_PIM = 0x58,
+ MLXSW_TRAP_ID_IPV4_VRRP = 0x59,
MLXSW_TRAP_ID_RPF = 0x5C,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60,
@@ -78,6 +79,7 @@ enum {
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
+ MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index b72d1bd11296..ebbdfb908745 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3373,7 +3373,6 @@ static void port_get_link_speed(struct ksz_port *port)
*/
static void port_set_link_speed(struct ksz_port *port)
{
- struct ksz_port_info *info;
struct ksz_hw *hw = port->hw;
u16 data;
u16 cfg;
@@ -3382,8 +3381,6 @@ static void port_set_link_speed(struct ksz_port *port)
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
- info = &hw->port_info[p];
-
port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index dd947e4dd3ce..e1747a490066 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -828,7 +828,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
}
if (!mac_address_valid)
- random_ether_addr(adapter->mac_address);
+ eth_random_addr(adapter->mac_address);
lan743x_mac_set_address(adapter, adapter->mac_address);
ether_addr_copy(netdev->dev_addr, adapter->mac_address);
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 776a8a9be8e3..1a4f2bb48ead 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
return 0;
}
+static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
+{
+ /* Select the VID to configure */
+ ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
+ ANA_TABLES_VLANTIDX);
+ /* Set the vlan port members mask and issue a write command */
+ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
+ ANA_TABLES_VLANACCESS_CMD_WRITE,
+ ANA_TABLES_VLANACCESS);
+
+ return ocelot_vlant_wait_for_completion(ocelot);
+}
+
+static void ocelot_vlan_mode(struct ocelot_port *port,
+ netdev_features_t features)
+{
+ struct ocelot *ocelot = port->ocelot;
+ u8 p = port->chip_port;
+ u32 val;
+
+ /* Filtering */
+ val = ocelot_read(ocelot, ANA_VLANMASK);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ val |= BIT(p);
+ else
+ val &= ~BIT(p);
+ ocelot_write(ocelot, val, ANA_VLANMASK);
+}
+
+static void ocelot_vlan_port_apply(struct ocelot *ocelot,
+ struct ocelot_port *port)
+{
+ u32 val;
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to clasify for untagged frames (may be zero) */
+ val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid);
+ if (port->vlan_aware)
+ val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_VLAN_CFG_VLAN_VID_M |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+ ANA_PORT_VLAN_CFG, port->chip_port);
+
+ /* Drop frames with multicast source address */
+ val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA;
+ if (port->vlan_aware && !port->vid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port);
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */
+ val = REW_TAG_CFG_TAG_TPID_CFG(0);
+
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG(1);
+ else
+ /* Tag all frames */
+ val |= REW_TAG_CFG_TAG_CFG(3);
+ }
+ ocelot_rmw_gix(ocelot, val,
+ REW_TAG_CFG_TAG_TPID_CFG_M |
+ REW_TAG_CFG_TAG_CFG_M,
+ REW_TAG_CFG, port->chip_port);
+
+ /* Set default VLAN and tag type to 8021Q. */
+ val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID(port->vid);
+ ocelot_rmw_gix(ocelot, val,
+ REW_PORT_VLAN_CFG_PORT_TPID_M |
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port->chip_port);
+}
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int ret;
+
+ /* Add the port MAC address to with the right VLAN information */
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+ ENTRYTYPE_LOCKED);
+
+ /* Make the port a member of the VLAN */
+ ocelot->vlan_mask[vid] |= BIT(port->chip_port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ /* Untagged egress vlan clasification */
+ if (untagged)
+ port->vid = vid;
+
+ ocelot_vlan_port_apply(ocelot, port);
+
+ return 0;
+}
+
+static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+ int ret;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ /* Del the port MAC address to with the right VLAN information */
+ ocelot_mact_forget(ocelot, dev->dev_addr, vid);
+
+ /* Stop the port from being a member of the vlan */
+ ocelot->vlan_mask[vid] &= ~BIT(port->chip_port);
+ ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ if (ret)
+ return ret;
+
+ /* Ingress */
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ /* Egress */
+ if (port->vid == vid)
+ port->vid = 0;
+
+ ocelot_vlan_port_apply(ocelot, port);
+
+ return 0;
+}
+
static void ocelot_vlan_init(struct ocelot *ocelot)
{
+ u16 port, vid;
+
/* Clear VLAN table, by default all ports are members of all VLANs */
ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
ANA_TABLES_VLANACCESS);
ocelot_vlant_wait_for_completion(ocelot);
+
+ /* Configure the port VLAN memberships */
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ ocelot->vlan_mask[vid] = 0;
+ ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+ }
+
+ /* Because VLAN filtering is enabled, we need VID 0 to get untagged
+ * traffic. It is added automatically if 8021q module is loaded, but
+ * we can't rely on it since module may be not loaded.
+ */
+ ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
+ ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
+
+ /* Configure the CPU port to be VLAN aware */
+ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ANA_PORT_VLAN_CFG, ocelot->num_phys_ports);
+
+ /* Set vlan ingress filter mask to all ports but the CPU port by
+ * default.
+ */
+ ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
+ ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
+ }
}
/* Watermark encode
@@ -539,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
+ if (!vid) {
+ if (!port->vlan_aware)
+ /* If the bridge is not VLAN aware and no VID was
+ * provided, set it to pvid to ensure the MAC entry
+ * matches incoming untagged packets
+ */
+ vid = port->pvid;
+ else
+ /* If the bridge is VLAN aware a VID must be provided as
+ * otherwise the learnt entry wouldn't match any frame.
+ */
+ return -EINVAL;
+ }
+
return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
ENTRYTYPE_NORMAL);
}
@@ -690,6 +883,30 @@ end:
return ret;
}
+static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_add(dev, vid, false, true);
+}
+
+static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
+{
+ return ocelot_vlan_vid_del(dev, vid);
+}
+
+static int ocelot_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ netdev_features_t changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ocelot_vlan_mode(port, features);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -701,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_fdb_add = ocelot_fdb_add,
.ndo_fdb_del = ocelot_fdb_del,
.ndo_fdb_dump = ocelot_fdb_dump,
+ .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
+ .ndo_set_features = ocelot_set_features,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -780,6 +1000,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_get_strings,
.get_ethtool_stats = ocelot_get_ethtool_stats,
.get_sset_count = ocelot_get_sset_count,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int ocelot_port_attr_get(struct net_device *dev,
@@ -914,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev,
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ocelot_port->vlan_aware = attr->u.vlan_filtering;
+ ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port);
+ break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
break;
@@ -925,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev,
return err;
}
+static int ocelot_port_obj_add_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_add(dev, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_PVID,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ocelot_port_vlan_del_vlan(struct net_device *dev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ int ret;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ret = ocelot_vlan_vid_del(dev, vid);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
const unsigned char *addr,
u16 vid)
@@ -951,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
bool new = false;
if (!vid)
- vid = 1;
+ vid = port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
@@ -992,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
u16 vid = mdb->vid;
if (!vid)
- vid = 1;
+ vid = port->pvid;
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
@@ -1024,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev,
int ret = 0;
switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_obj_add_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
trans);
@@ -1041,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev,
int ret = 0;
switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ ret = ocelot_port_vlan_del_vlan(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
@@ -1086,6 +1355,142 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
+
+ /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */
+ ocelot_port->vlan_aware = 0;
+ ocelot_port->pvid = 0;
+ ocelot_port->vid = 0;
+}
+
+static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
+{
+ int i, port, lag;
+
+ /* Reset destination and aggregation PGIDS */
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+
+ for (i = PGID_AGGR; i < PGID_SRC; i++)
+ ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+ ANA_PGID_PGID, i);
+
+ /* Now, set PGIDs for each LAG */
+ for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
+ unsigned long bond_mask;
+ int aggr_count = 0;
+ u8 aggr_idx[16];
+
+ bond_mask = ocelot->lags[lag];
+ if (!bond_mask)
+ continue;
+
+ for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
+ // Destination mask
+ ocelot_write_rix(ocelot, bond_mask,
+ ANA_PGID_PGID, port);
+ aggr_idx[aggr_count] = port;
+ aggr_count++;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; i++) {
+ u32 ac;
+
+ ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
+ ac &= ~bond_mask;
+ ac |= BIT(aggr_idx[i % aggr_count]);
+ ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
+ }
+ }
+}
+
+static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
+{
+ unsigned long bond_mask = ocelot->lags[lag];
+ unsigned int p;
+
+ for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
+ u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+
+ /* Use lag port as logical port for port i */
+ ocelot_write_gix(ocelot, port_cfg |
+ ANA_PORT_PORT_CFG_PORTID_VAL(lag),
+ ANA_PORT_PORT_CFG, p);
+ }
+}
+
+static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int p = ocelot_port->chip_port;
+ int lag, lp;
+ struct net_device *ndev;
+ u32 bond_mask = 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(bond, ndev) {
+ struct ocelot_port *port = netdev_priv(ndev);
+
+ bond_mask |= BIT(port->chip_port);
+ }
+ rcu_read_unlock();
+
+ lp = __ffs(bond_mask);
+
+ /* If the new port is the lowest one, use it as the logical port from
+ * now on
+ */
+ if (p == lp) {
+ lag = p;
+ ocelot->lags[p] = bond_mask;
+ bond_mask &= ~BIT(p);
+ if (bond_mask) {
+ lp = __ffs(bond_mask);
+ ocelot->lags[lp] = 0;
+ }
+ } else {
+ lag = lp;
+ ocelot->lags[lp] |= BIT(p);
+ }
+
+ ocelot_setup_lag(ocelot, lag);
+ ocelot_set_aggr_pgids(ocelot);
+
+ return 0;
+}
+
+static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port,
+ struct net_device *bond)
+{
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int p = ocelot_port->chip_port;
+ u32 port_cfg;
+ int i;
+
+ /* Remove port from any lag */
+ for (i = 0; i < ocelot->num_phys_ports; i++)
+ ocelot->lags[i] &= ~BIT(ocelot_port->chip_port);
+
+ /* if it was the logical port of the lag, move the lag config to the
+ * next port
+ */
+ if (ocelot->lags[p]) {
+ int n = __ffs(ocelot->lags[p]);
+
+ ocelot->lags[n] = ocelot->lags[p];
+ ocelot->lags[p] = 0;
+
+ ocelot_setup_lag(ocelot, n);
+ }
+
+ port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+ ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
+ ANA_PORT_PORT_CFG, p);
+
+ ocelot_set_aggr_pgids(ocelot);
}
/* Checks if the net_device instance given to us originate from our driver. */
@@ -1113,6 +1518,17 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
else
ocelot_port_bridge_leave(ocelot_port,
info->upper_dev);
+
+ ocelot_vlan_port_apply(ocelot_port->ocelot,
+ ocelot_port);
+ }
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = ocelot_port_lag_join(ocelot_port,
+ info->upper_dev);
+ else
+ ocelot_port_lag_leave(ocelot_port,
+ info->upper_dev);
}
break;
default:
@@ -1129,6 +1545,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
int ret = 0;
+ if (event == NETDEV_PRECHANGEUPPER &&
+ netif_is_lag_master(info->upper_dev)) {
+ struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+ struct netlink_ext_ack *extack;
+
+ if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ extack = netdev_notifier_info_to_extack(&info->info);
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+
+ ret = -EINVAL;
+ goto notify;
+ }
+ }
+
if (netif_is_lag_master(dev)) {
struct net_device *slave;
struct list_head *iter;
@@ -1176,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->ethtool_ops = &ocelot_ethtool_ops;
dev->switchdev_ops = &ocelot_port_switchdev_ops;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
dev->dev_addr[ETH_ALEN - 1] += port;
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
@@ -1187,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
goto err_register_netdev;
}
+ /* Basic L2 initialization */
+ ocelot_vlan_port_apply(ocelot, ocelot_port);
+
return 0;
err_register_netdev:
@@ -1201,6 +1637,11 @@ int ocelot_init(struct ocelot *ocelot)
int i, cpu = ocelot->num_phys_ports;
char queue_name[32];
+ ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(u32), GFP_KERNEL);
+ if (!ocelot->lags)
+ return -ENOMEM;
+
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * ocelot->num_stats,
sizeof(u64), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 097bd12a10d4..616bec30dfa3 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -493,7 +493,7 @@ struct ocelot {
u8 num_cpu_ports;
struct ocelot_port **ports;
- u16 lags[16];
+ u32 *lags;
/* Keep track of the vlan port masks */
u32 vlan_mask[VLAN_N_VID];
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 18df7d934e81..26bb3b18f3be 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -29,7 +29,7 @@ static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
- info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16;
+ info->tag_type = (ifh[3] & BIT(16)) >> 16;
info->vid = ifh[3] & GENMASK(11, 0);
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 8a92088df0d7..1d9e36835404 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -34,10 +34,11 @@
#define pr_fmt(fmt) "NFP net bpf: " fmt
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
#include "main.h"
@@ -416,6 +417,60 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst,
}
static void
+__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
+ enum mul_type type, enum mul_step step, u16 breg, bool swap,
+ bool wr_both, bool dst_lmextn, bool src_lmextn)
+{
+ u64 insn;
+
+ insn = OP_MUL_BASE |
+ FIELD_PREP(OP_MUL_A_SRC, areg) |
+ FIELD_PREP(OP_MUL_B_SRC, breg) |
+ FIELD_PREP(OP_MUL_STEP, step) |
+ FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
+ FIELD_PREP(OP_MUL_SW, swap) |
+ FIELD_PREP(OP_MUL_TYPE, type) |
+ FIELD_PREP(OP_MUL_WR_AB, wr_both) |
+ FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
+ enum mul_step step, swreg rreg)
+{
+ struct nfp_insn_ur_regs reg;
+ u16 areg;
+ int err;
+
+ if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
+ nfp_prog->error = -EINVAL;
+ return;
+ }
+
+ if (step == MUL_LAST || step == MUL_LAST_2) {
+ /* When type is step and step Number is LAST or LAST2, left
+ * source is used as destination.
+ */
+ err = swreg_to_unrestricted(lreg, reg_none(), rreg, &reg);
+ areg = reg.dst;
+ } else {
+ err = swreg_to_unrestricted(reg_none(), lreg, rreg, &reg);
+ areg = reg.areg;
+ }
+
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
+ reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
+}
+
+static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
bool zero, bool swap, bool wr_both,
@@ -670,7 +725,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
xfer_num = round_up(len, 4) / 4;
if (src_40bit_addr)
- addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+ addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
&off);
/* Setup PREV_ALU fields to override memory read length. */
@@ -1380,6 +1435,133 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
SHF_SC_R_ROT, 16);
}
+static void
+wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+ swreg rreg, bool gen_high_half)
+{
+ emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
+ emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
+ if (gen_high_half)
+ emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
+ reg_none());
+ else
+ wrp_immed(nfp_prog, dst_hi, 0);
+}
+
+static void
+wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+ swreg rreg)
+{
+ emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
+ emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
+ emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
+}
+
+static int
+wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ bool gen_high_half, bool ropnd_from_reg)
+{
+ swreg multiplier, multiplicand, dst_hi, dst_lo;
+ const struct bpf_insn *insn = &meta->insn;
+ u32 lopnd_max, ropnd_max;
+ u8 dst_reg;
+
+ dst_reg = insn->dst_reg;
+ multiplicand = reg_a(dst_reg * 2);
+ dst_hi = reg_both(dst_reg * 2 + 1);
+ dst_lo = reg_both(dst_reg * 2);
+ lopnd_max = meta->umax_dst;
+ if (ropnd_from_reg) {
+ multiplier = reg_b(insn->src_reg * 2);
+ ropnd_max = meta->umax_src;
+ } else {
+ u32 imm = insn->imm;
+
+ multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+ ropnd_max = imm;
+ }
+ if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
+ wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
+ gen_high_half);
+ else
+ wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
+
+ return 0;
+}
+
+static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
+{
+ swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
+ struct reciprocal_value_adv rvalue;
+ u8 pre_shift, exp;
+ swreg magic;
+
+ if (imm > U32_MAX) {
+ wrp_immed(nfp_prog, dst_both, 0);
+ return 0;
+ }
+
+ /* NOTE: because we are using "reciprocal_value_adv" which doesn't
+ * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
+ * to handle such case which actually equals to the result of unsigned
+ * comparison "dst >= imm" which could be calculated using the following
+ * NFP sequence:
+ *
+ * alu[--, dst, -, imm]
+ * immed[imm, 0]
+ * alu[dst, imm, +carry, 0]
+ *
+ */
+ if (imm > 1U << 31) {
+ swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+
+ emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
+ wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
+ emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
+ reg_imm(0));
+ return 0;
+ }
+
+ rvalue = reciprocal_value_adv(imm, 32);
+ exp = rvalue.exp;
+ if (rvalue.is_wide_m && !(imm & 1)) {
+ pre_shift = fls(imm & -imm) - 1;
+ rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
+ } else {
+ pre_shift = 0;
+ }
+ magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
+ if (imm == 1U << exp) {
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, exp);
+ } else if (rvalue.is_wide_m) {
+ wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
+ magic, true);
+ emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
+ imm_b(nfp_prog));
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, 1);
+ emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
+ imm_b(nfp_prog));
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+ SHF_SC_R_SHF, rvalue.sh - 1);
+ } else {
+ if (pre_shift)
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+ dst_b, SHF_SC_R_SHF, pre_shift);
+ wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
+ emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+ dst_b, SHF_SC_R_SHF, rvalue.sh);
+ }
+
+ return 0;
+}
+
static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
@@ -1684,6 +1866,31 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, true, true);
+}
+
+static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, true, false);
+}
+
+static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
+}
+
+static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ /* NOTE: verifier hook has rejected cases for which verifier doesn't
+ * know whether the source operand is constant or not.
+ */
+ return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
+}
+
static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -1772,8 +1979,8 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __shl_imm64(nfp_prog, dst, umin);
@@ -1881,8 +2088,8 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __shr_imm64(nfp_prog, dst, umin);
@@ -1995,8 +2202,8 @@ static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u8 dst, src;
dst = insn->dst_reg * 2;
- umin = meta->umin;
- umax = meta->umax;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
if (umin == umax)
return __ashr_imm64(nfp_prog, dst, umin);
@@ -2097,6 +2304,26 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
}
+static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, false, true);
+}
+
+static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_mul(nfp_prog, meta, false, false);
+}
+
+static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return div_reg64(nfp_prog, meta);
+}
+
+static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return div_imm64(nfp_prog, meta);
+}
+
static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
u8 dst = meta->insn.dst_reg * 2;
@@ -2848,6 +3075,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
[BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
[BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64,
+ [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64,
+ [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64,
+ [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64,
[BPF_ALU64 | BPF_NEG] = neg_reg64,
[BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
[BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
@@ -2867,6 +3098,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_ADD | BPF_K] = add_imm,
[BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
[BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_MUL | BPF_X] = mul_reg,
+ [BPF_ALU | BPF_MUL | BPF_K] = mul_imm,
+ [BPF_ALU | BPF_DIV | BPF_X] = div_reg,
+ [BPF_ALU | BPF_DIV | BPF_K] = div_imm,
[BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
[BPF_ALU | BPF_END | BPF_X] = end_reg32,
@@ -3299,7 +3534,8 @@ curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
return false;
- if (ld_meta->ptr.type != PTR_TO_PACKET)
+ if (ld_meta->ptr.type != PTR_TO_PACKET &&
+ ld_meta->ptr.type != PTR_TO_MAP_VALUE)
return false;
if (st_meta->ptr.type != PTR_TO_PACKET)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 40216d56dddc..b95b94d008cf 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -66,26 +66,19 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog, struct netlink_ext_ack *extack)
{
bool running, xdp_running;
- int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
- xdp_running = running && nn->dp.bpf_offload_xdp;
+ xdp_running = running && nn->xdp_hw.prog;
if (!prog && !xdp_running)
return 0;
if (prog && running && !xdp_running)
return -EBUSY;
- ret = nfp_net_bpf_offload(nn, prog, running, extack);
- /* Stop offload if replace not possible */
- if (ret)
- return ret;
-
- nn->dp.bpf_offload_xdp = !!prog;
- return ret;
+ return nfp_net_bpf_offload(nn, prog, running, extack);
}
static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
@@ -209,7 +202,7 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_bpf_setup_tc_block_cb,
- nn, nn);
+ nn, nn, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_bpf_setup_tc_block_cb,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 654fe7823e5e..9845c1a2d4c2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -263,8 +263,10 @@ struct nfp_bpf_reg_state {
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
- * @umin: copy of core verifier umin_value.
- * @umax: copy of core verifier umax_value.
+ * @umin_src: copy of core verifier umin_value for src opearnd.
+ * @umax_src: copy of core verifier umax_value for src operand.
+ * @umin_dst: copy of core verifier umin_value for dst opearnd.
+ * @umax_dst: copy of core verifier umax_value for dst operand.
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
@@ -300,12 +302,15 @@ struct nfp_insn_meta {
struct bpf_reg_state arg1;
struct nfp_bpf_reg_state arg2;
};
- /* We are interested in range info for some operands,
- * for example, the shift amount.
+ /* We are interested in range info for operands of ALU
+ * operations. For example, shift amount, multiplicand and
+ * multiplier etc.
*/
struct {
- u64 umin;
- u64 umax;
+ u64 umin_src;
+ u64 umax_src;
+ u64 umin_dst;
+ u64 umax_dst;
};
};
unsigned int off;
@@ -339,6 +344,11 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
return BPF_MODE(meta->insn.code);
}
+static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
+{
+ return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
+}
+
static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
@@ -384,23 +394,14 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
}
-static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
{
- u8 code = meta->insn.code;
- bool is_alu, is_shift;
- u8 opclass, opcode;
-
- opclass = BPF_CLASS(code);
- is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU;
- if (!is_alu)
- return false;
-
- opcode = BPF_OP(code);
- is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH;
- if (!is_shift)
- return false;
+ return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
+}
- return BPF_SRC(code) == BPF_X;
+static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
+{
+ return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
/**
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 7eae4c0266f8..78f44c4d95b4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -190,8 +190,10 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
meta->insn = prog[i];
meta->n = i;
- if (is_mbpf_indir_shift(meta))
- meta->umin = U64_MAX;
+ if (is_mbpf_alu(meta)) {
+ meta->umin_src = U64_MAX;
+ meta->umin_dst = U64_MAX;
+ }
list_add_tail(&meta->l, &nfp_prog->insns);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 4bfeba7b21b2..49ba0d645d36 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -517,6 +517,82 @@ nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
}
static int
+nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *sreg =
+ cur_regs(env) + meta->insn.src_reg;
+ const struct bpf_reg_state *dreg =
+ cur_regs(env) + meta->insn.dst_reg;
+
+ meta->umin_src = min(meta->umin_src, sreg->umin_value);
+ meta->umax_src = max(meta->umax_src, sreg->umax_value);
+ meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
+ meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
+
+ /* NFP supports u16 and u32 multiplication.
+ *
+ * For ALU64, if either operand is beyond u32's value range, we reject
+ * it. One thing to note, if the source operand is BPF_K, then we need
+ * to check "imm" field directly, and we'd reject it if it is negative.
+ * Because for ALU64, "imm" (with s32 type) is expected to be sign
+ * extended to s64 which NFP mul doesn't support.
+ *
+ * For ALU32, it is fine for "imm" be negative though, because the
+ * result is 32-bits and there is no difference on the low halve of
+ * the result for signed/unsigned mul, so we will get correct result.
+ */
+ if (is_mbpf_mul(meta)) {
+ if (meta->umax_dst > U32_MAX) {
+ pr_vlog(env, "multiplier is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
+ pr_vlog(env, "multiplicand is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_class(meta) == BPF_ALU64 &&
+ mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+ pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
+ return -EINVAL;
+ }
+ }
+
+ /* NFP doesn't have divide instructions, we support divide by constant
+ * through reciprocal multiplication. Given NFP support multiplication
+ * no bigger than u32, we'd require divisor and dividend no bigger than
+ * that as well.
+ *
+ * Also eBPF doesn't support signed divide and has enforced this on C
+ * language level by failing compilation. However LLVM assembler hasn't
+ * enforced this, so it is possible for negative constant to leak in as
+ * a BPF_K operand through assembly code, we reject such cases as well.
+ */
+ if (is_mbpf_div(meta)) {
+ if (meta->umax_dst > U32_MAX) {
+ pr_vlog(env, "dividend is not within u32 value range\n");
+ return -EINVAL;
+ }
+ if (mbpf_src(meta) == BPF_X) {
+ if (meta->umin_src != meta->umax_src) {
+ pr_vlog(env, "divisor is not constant\n");
+ return -EINVAL;
+ }
+ if (meta->umax_src > U32_MAX) {
+ pr_vlog(env, "divisor is not within u32 value range\n");
+ return -EINVAL;
+ }
+ }
+ if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+ pr_vlog(env, "divide by negative constant is not supported\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
@@ -551,13 +627,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
if (is_mbpf_xadd(meta))
return nfp_bpf_check_xadd(nfp_prog, meta, env);
- if (is_mbpf_indir_shift(meta)) {
- const struct bpf_reg_state *sreg =
- cur_regs(env) + meta->insn.src_reg;
-
- meta->umin = min(meta->umin, sreg->umin_value);
- meta->umax = max(meta->umax, sreg->umax_value);
- }
+ if (is_mbpf_alu(meta))
+ return nfp_bpf_check_alu(nfp_prog, meta, env);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 4a6d2db75071..e56b815a8dc6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -34,6 +34,7 @@
#include <linux/bitfield.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
+#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_pedit.h>
@@ -44,6 +45,8 @@
#include "main.h"
#include "../nfp_net_repr.h"
+#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (TUNNEL_CSUM | TUNNEL_KEY)
+
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -235,9 +238,12 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
u32 tmp_set_ip_tun_type_index = 0;
+ struct flowi4 flow = {};
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ struct rtable *rt;
struct net *net;
+ int err;
if (ip_tun->options_len)
return -EOPNOTSUPP;
@@ -254,7 +260,28 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
- set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+
+ /* Do a route lookup to determine ttl - if fails then use default.
+ * Note that CONFIG_INET is a requirement of CONFIG_NET_SWITCHDEV so
+ * must be defined here.
+ */
+ flow.daddr = ip_tun->key.u.ipv4.dst;
+ flow.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(net, &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (!err) {
+ set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
+ ip_rt_put(rt);
+ } else {
+ set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+ }
+
+ set_tun->tos = ip_tun->key.tos;
+
+ if (!(ip_tun->key.tun_flags & TUNNEL_KEY) ||
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
+ return -EOPNOTSUPP;
+ set_tun->tun_flags = ip_tun->key.tun_flags;
/* Complete pre_tunnel action. */
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
@@ -398,8 +425,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
return 0;
}
+static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
+{
+ switch (ip_proto) {
+ case 0:
+ /* Filter doesn't force proto match,
+ * both TCP and UDP will be updated if encountered
+ */
+ return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
+ case IPPROTO_TCP:
+ return TCA_CSUM_UPDATE_FLAG_TCP;
+ case IPPROTO_UDP:
+ return TCA_CSUM_UPDATE_FLAG_UDP;
+ default:
+ /* All other protocols will be ignored by FW */
+ return 0;
+ }
+}
+
static int
-nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
+ char *nfp_action, int *a_len, u32 *csum_updated)
{
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ip4_addrs set_ip_addr;
@@ -409,6 +455,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
int idx, nkeys, err;
size_t act_size;
u32 offset, cmd;
+ u8 ip_proto = 0;
memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
memset(&set_ip6_src, 0, sizeof(set_ip6_src));
@@ -451,6 +498,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
return err;
}
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *basic;
+
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ ip_proto = basic->ip_proto;
+ }
+
if (set_eth.head.len_lw) {
act_size = sizeof(set_eth);
memcpy(nfp_action, &set_eth, act_size);
@@ -459,6 +515,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
act_size = sizeof(set_ip_addr);
memcpy(nfp_action, &set_ip_addr, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+ nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
@@ -471,18 +531,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw) {
act_size = sizeof(set_ip6_dst);
memcpy(nfp_action, &set_ip6_dst, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_src.head.len_lw) {
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_tport.head.len_lw) {
act_size = sizeof(set_tport);
memcpy(nfp_action, &set_tport, act_size);
*a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
return 0;
@@ -493,12 +565,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt)
+ int *out_cnt, u32 *csum_updated)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_output *output;
int err, prelag_size;
+ /* If csum_updated has not been reset by now, it means HW will
+ * incorrectly update csums when they are not requested.
+ */
+ if (*csum_updated)
+ return -EOPNOTSUPP;
+
if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
@@ -529,10 +607,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
static int
nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt)
+ int *out_cnt, u32 *csum_updated)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
@@ -545,14 +624,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
} else if (is_tcf_mirred_egress_redirect(a)) {
err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt);
+ out_cnt, csum_updated);
if (err)
return err;
} else if (is_tcf_mirred_egress_mirror(a)) {
err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt);
+ out_cnt, csum_updated);
if (err)
return err;
@@ -602,8 +681,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
/* Tunnel decap is handled by default so accept action. */
return 0;
} else if (is_tcf_pedit(a)) {
- if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
+ a_len, csum_updated))
+ return -EOPNOTSUPP;
+ } else if (is_tcf_csum(a)) {
+ /* csum action requests recalc of something we have not fixed */
+ if (tcf_csum_update_flags(a) & ~*csum_updated)
return -EOPNOTSUPP;
+ /* If we will correctly fix the csum we can remove it from the
+ * csum update list. Which will later be used to check support.
+ */
+ *csum_updated &= ~tcf_csum_update_flags(a);
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
@@ -620,6 +708,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
int act_len, act_cnt, err, tun_out_cnt, out_cnt;
enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
+ u32 csum_updated = 0;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -632,8 +721,9 @@ int nfp_flower_compile_action(struct nfp_app *app,
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev,
- &tun_type, &tun_out_cnt, &out_cnt);
+ err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
+ netdev, &tun_type, &tun_out_cnt,
+ &out_cnt, &csum_updated);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4a7f3510a296..15f1eacd76b6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -203,9 +203,9 @@ struct nfp_fl_set_ipv4_udp_tun {
__be16 reserved;
__be64 tun_id __packed;
__be32 tun_type_index;
- __be16 reserved2;
+ __be16 tun_flags;
u8 ttl;
- u8 reserved3;
+ u8 tos;
__be32 extra[2];
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 0c4c957717ea..bf10598f66ae 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
if (lag_upper_info &&
lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
(lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
- (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
- lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) {
+ (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
+ lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
can_offload = false;
nfp_flower_cmsg_warn(priv->app,
"Unable to offload tx_type %u hash %u\n",
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index bbe5764d26cb..ef2114d13387 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -73,7 +73,7 @@ struct nfp_app;
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
- struct timespec64 *last_used;
+ ktime_t *last_used;
u8 init_unallocated;
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 93fb809f50d1..c098730544b7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -158,7 +158,6 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
{
struct nfp_flower_priv *priv = app->priv;
struct circ_buf *ring;
- struct timespec64 now;
ring = &priv->mask_ids.mask_id_free_list;
/* Checking if buffer is full. */
@@ -169,8 +168,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
(NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
- getnstimeofday64(&now);
- priv->mask_ids.last_used[mask_id] = now;
+ priv->mask_ids.last_used[mask_id] = ktime_get();
return 0;
}
@@ -178,7 +176,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
{
struct nfp_flower_priv *priv = app->priv;
- struct timespec64 delta, now;
+ ktime_t reuse_timeout;
struct circ_buf *ring;
u8 temp_id, freed_id;
@@ -198,10 +196,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
*mask_id = temp_id;
- getnstimeofday64(&now);
- delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]);
+ reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
+ NFP_FL_MASK_REUSE_TIME_NS);
- if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS)
+ if (ktime_before(ktime_get(), reuse_timeout))
goto err_not_found;
memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 525057bee0ed..43b9bf12b174 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -584,9 +584,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return nfp_flower_del_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_STATS:
return nfp_flower_get_stats(app, netdev, flower, egress);
+ default:
+ return -EOPNOTSUPP;
}
-
- return -EOPNOTSUPP;
}
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
@@ -638,7 +638,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
nfp_flower_setup_tc_block_cb,
- repr, repr);
+ repr, repr, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
nfp_flower_setup_tc_block_cb,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index f6677bc9875a..cdc4e065f6f5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -426,4 +426,32 @@ static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
}
+enum mul_type {
+ MUL_TYPE_START = 0x00,
+ MUL_TYPE_STEP_24x8 = 0x01,
+ MUL_TYPE_STEP_16x16 = 0x02,
+ MUL_TYPE_STEP_32x32 = 0x03,
+};
+
+enum mul_step {
+ MUL_STEP_1 = 0x00,
+ MUL_STEP_NONE = MUL_STEP_1,
+ MUL_STEP_2 = 0x01,
+ MUL_STEP_3 = 0x02,
+ MUL_STEP_4 = 0x03,
+ MUL_LAST = 0x04,
+ MUL_LAST_2 = 0x05,
+};
+
+#define OP_MUL_BASE 0x0f800000000ULL
+#define OP_MUL_A_SRC 0x000000003ffULL
+#define OP_MUL_B_SRC 0x000000ffc00ULL
+#define OP_MUL_STEP 0x00000700000ULL
+#define OP_MUL_DST_AB 0x00000800000ULL
+#define OP_MUL_SW 0x00040000000ULL
+#define OP_MUL_TYPE 0x00180000000ULL
+#define OP_MUL_WR_AB 0x20000000000ULL
+#define OP_MUL_SRC_LMEXTN 0x40000000000ULL
+#define OP_MUL_DST_LMEXTN 0x80000000000ULL
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 2a71a9ffd095..8970ec981e11 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -485,7 +485,6 @@ struct nfp_stat_pair {
* @dev: Backpointer to struct device
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
- * @bpf_offload_xdp: Offloaded BPF program is XDP
* @chained_metadata_format: Firemware will use new metadata format
* @rx_dma_dir: Mapping direction for RX buffers
* @rx_dma_off: Offset at which DMA packets (for XDP headroom)
@@ -510,7 +509,6 @@ struct nfp_net_dp {
struct net_device *netdev;
u8 is_vf:1;
- u8 bpf_offload_xdp:1;
u8 chained_metadata_format:1;
u8 rx_dma_dir;
@@ -553,8 +551,8 @@ struct nfp_net_dp {
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
- * @xdp_flags: Flags with which XDP prog was loaded
- * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes)
+ * @xdp: Information about the driver XDP program
+ * @xdp_hw: Information about the HW XDP program
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
@@ -610,8 +608,8 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
- u32 xdp_flags;
- struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdp;
+ struct xdp_attachment_info xdp_hw;
unsigned int max_tx_rings;
unsigned int max_rx_rings;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index d4c27f849f9b..a712e83c3f0f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -945,11 +945,12 @@ err_free:
/**
* nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring: TX ring structure
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
*
* Return: Number of completed TX descriptors
*/
-static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
+static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
@@ -999,7 +1000,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
/* check for last gather fragment */
if (fidx == nr_frags - 1)
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
tx_ring->txbufs[idx].dma_addr = 0;
tx_ring->txbufs[idx].skb = NULL;
@@ -1709,8 +1710,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
}
- if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
- dp->bpf_offload_xdp) && !meta.portid) {
+ if (xdp_prog && !meta.portid) {
void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
int act;
@@ -1828,7 +1828,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
unsigned int pkts_polled = 0;
if (r_vec->tx_ring)
- nfp_net_tx_complete(r_vec->tx_ring);
+ nfp_net_tx_complete(r_vec->tx_ring, budget);
if (r_vec->rx_ring)
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
@@ -2062,7 +2062,7 @@ static void nfp_ctrl_poll(unsigned long arg)
struct nfp_net_r_vector *r_vec = (void *)arg;
spin_lock_bh(&r_vec->lock);
- nfp_net_tx_complete(r_vec->tx_ring);
+ nfp_net_tx_complete(r_vec->tx_ring, 0);
__nfp_ctrl_tx_queued(r_vec);
spin_unlock_bh(&r_vec->lock);
@@ -3115,6 +3115,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void nfp_net_netpoll(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int i;
+
+ /* nfp_net's NAPIs are statically allocated so even if there is a race
+ * with reconfig path this will simply try to schedule some disabled
+ * NAPI instances.
+ */
+ for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
+ napi_schedule_irqoff(&nn->r_vecs[i].napi);
+}
+#endif
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3377,14 +3392,18 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
nfp_net_set_vxlan_port(nn, idx, 0);
}
-static int
-nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
- struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
{
+ struct bpf_prog *prog = bpf->prog;
struct nfp_net_dp *dp;
+ int err;
+
+ if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
+ return -EBUSY;
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
+ xdp_attachment_setup(&nn->xdp, bpf);
return 0;
}
@@ -3398,38 +3417,26 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
- return nfp_net_ring_reconfig(nn, dp, extack);
+ err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
+ if (err)
+ return err;
+
+ xdp_attachment_setup(&nn->xdp, bpf);
+ return 0;
}
-static int
-nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
- struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
{
- struct bpf_prog *drv_prog, *offload_prog;
int err;
- if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
+ if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
return -EBUSY;
- /* Load both when no flags set to allow easy activation of driver path
- * when program is replaced by one which can't be offloaded.
- */
- drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
- offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
-
- err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
+ err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;
- err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
- if (err && flags & XDP_FLAGS_HW_MODE)
- return err;
-
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
- nn->xdp_prog = prog;
- nn->xdp_flags = flags;
-
+ xdp_attachment_setup(&nn->xdp_hw, bpf);
return 0;
}
@@ -3439,16 +3446,13 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
+ return nfp_net_xdp_setup_drv(nn, xdp);
case XDP_SETUP_PROG_HW:
- return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
- xdp->extack);
+ return nfp_net_xdp_setup_hw(nn, xdp);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!nn->xdp_prog;
- if (nn->dp.bpf_offload_xdp)
- xdp->prog_attached = XDP_ATTACHED_HW;
- xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
- xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
- return 0;
+ return xdp_attachment_query(&nn->xdp, xdp);
+ case XDP_QUERY_PROG_HW:
+ return xdp_attachment_query(&nn->xdp_hw, xdp);
default:
return nfp_app_bpf(nn->app, nn, xdp);
}
@@ -3482,6 +3486,9 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = nfp_net_netpoll,
+#endif
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 26d1cc4e2906..6a79c8e4a7a4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
- struct nfp_app *app;
-
- app = nfp_app_from_netdev(netdev);
- if (!app)
- return;
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ strlcpy(drvinfo->bus_info, pci_name(app->pdev),
+ sizeof(drvinfo->bus_info));
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
@@ -452,7 +450,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
+ return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -460,7 +458,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
struct nfp_net *nn = netdev_priv(netdev);
int i;
- for (i = 0; i < nn->dp.num_r_vecs; i++) {
+ for (i = 0; i < nn->max_r_vecs; i++) {
data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
@@ -486,7 +484,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
u64 tmp[NN_RVEC_GATHER_STATS];
unsigned int i, j;
- for (i = 0; i < nn->dp.num_r_vecs; i++) {
+ for (i = 0; i < nn->max_r_vecs; i++) {
unsigned int start;
do {
@@ -521,15 +519,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
return data;
}
-static unsigned int
-nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
+static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
{
- return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
+ return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
}
static u8 *
-nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
- unsigned int tx_rings, bool repr)
+nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
{
int swap_off, i;
@@ -549,36 +545,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i].name);
- for (i = 0; i < tx_rings; i++) {
- data = nfp_pr_et(data, "txq_%u_pkts", i);
- data = nfp_pr_et(data, "txq_%u_bytes", i);
- }
-
- for (i = 0; i < rx_rings; i++) {
+ for (i = 0; i < num_vecs; i++) {
data = nfp_pr_et(data, "rxq_%u_pkts", i);
data = nfp_pr_et(data, "rxq_%u_bytes", i);
+ data = nfp_pr_et(data, "txq_%u_pkts", i);
+ data = nfp_pr_et(data, "txq_%u_bytes", i);
}
return data;
}
static u64 *
-nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
- unsigned int rx_rings, unsigned int tx_rings)
+nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
{
unsigned int i;
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
*data++ = readq(mem + nfp_net_et_stats[i].off);
- for (i = 0; i < tx_rings; i++) {
- *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
- *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
- }
-
- for (i = 0; i < rx_rings; i++) {
+ for (i = 0; i < num_vecs; i++) {
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+ *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
}
return data;
@@ -633,8 +622,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
data = nfp_vnic_get_sw_stats_strings(netdev, data);
- data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
- nn->dp.num_tx_rings,
+ data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
false);
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(nn->port, data);
@@ -649,8 +637,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_net *nn = netdev_priv(netdev);
data = nfp_vnic_get_sw_stats(netdev, data);
- data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
- nn->dp.num_rx_rings, nn->dp.num_tx_rings);
+ data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(nn->port, data);
}
@@ -662,8 +649,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
return nfp_vnic_get_sw_stats_count(netdev) +
- nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
- nn->dp.num_tx_rings) +
+ nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
nfp_mac_get_stats_count(netdev) +
nfp_app_port_get_stats_count(nn->port);
default:
@@ -679,7 +665,7 @@ static void nfp_port_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
- data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
+ data = nfp_vnic_get_hw_stats_strings(data, 0, true);
else
data = nfp_mac_get_stats_strings(netdev, data);
data = nfp_app_port_get_stats_strings(port, data);
@@ -694,7 +680,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
struct nfp_port *port = nfp_port_from_netdev(netdev);
if (nfp_port_is_vnic(port))
- data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
+ data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
else
data = nfp_mac_get_stats(netdev, data);
data = nfp_app_port_get_stats(port, data);
@@ -708,7 +694,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
if (nfp_port_is_vnic(port))
- count = nfp_vnic_get_hw_stats_count(0, 0);
+ count = nfp_vnic_get_hw_stats_count(0);
else
count = nfp_mac_get_stats_count(netdev);
count += nfp_app_port_get_stats_count(port);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 749655c329b2..c8d0b1016a64 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp)
kfree(nfp);
}
-static void nfp6000_read_serial(struct device *dev, u8 *serial)
+static int nfp6000_read_serial(struct device *dev, u8 *serial)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
@@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial)
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
if (!pos) {
- memset(serial, 0, NFP_SERIAL_LEN);
- return;
+ dev_err(dev, "can't find PCIe Serial Number Capability\n");
+ return -EINVAL;
}
pci_read_config_dword(pdev, pos + 4, &reg);
put_unaligned_be16(reg >> 16, serial + 4);
pci_read_config_dword(pdev, pos + 8, &reg);
put_unaligned_be32(reg, serial);
+
+ return 0;
}
-static u16 nfp6000_get_interface(struct device *dev)
+static int nfp6000_get_interface(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int pos;
u32 reg;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos)
- return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
+ if (!pos) {
+ dev_err(dev, "can't find PCIe Serial Number Capability\n");
+ return -EINVAL;
+ }
pci_read_config_dword(pdev, pos + 4, &reg);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index b0da3d436850..c338d539fa96 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -364,8 +364,8 @@ struct nfp_cpp_operations {
int (*init)(struct nfp_cpp *cpp);
void (*free)(struct nfp_cpp *cpp);
- void (*read_serial)(struct device *dev, u8 *serial);
- u16 (*get_interface)(struct device *dev);
+ int (*read_serial)(struct device *dev, u8 *serial);
+ int (*get_interface)(struct device *dev);
int (*area_init)(struct nfp_cpp_area *area,
u32 dest, unsigned long long address,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index ef30597aa319..73de57a09800 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
{
const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
struct nfp_cpp *cpp;
+ int ifc, err;
u32 mask[2];
u32 xpbaddr;
size_t tgt;
- int err;
cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
if (!cpp) {
@@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
cpp->op = ops;
cpp->priv = priv;
- cpp->interface = ops->get_interface(parent);
- if (ops->read_serial)
- ops->read_serial(parent, cpp->serial);
+
+ ifc = ops->get_interface(parent);
+ if (ifc < 0) {
+ err = ifc;
+ goto err_free_cpp;
+ }
+ cpp->interface = ifc;
+ if (ops->read_serial) {
+ err = ops->read_serial(parent, cpp->serial);
+ if (err)
+ goto err_free_cpp;
+ }
+
rwlock_init(&cpp->resource_lock);
init_waitqueue_head(&cpp->waitq);
lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
@@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
err = device_register(&cpp->dev);
if (err < 0) {
put_device(&cpp->dev);
- goto err_dev;
+ goto err_free_cpp;
}
dev_set_drvdata(&cpp->dev, cpp);
@@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
err_out:
device_unregister(&cpp->dev);
-err_dev:
+err_free_cpp:
kfree(cpp);
err_malloc:
return ERR_PTR(err);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
index 31288d4ad248..862de0f3bc41 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_PCH_GBE) += pch_gbe.o
pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
-pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
+pch_gbe-y += pch_gbe_main.o
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 697e29dd4bd3..44c2f291e766 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -326,32 +326,6 @@ struct pch_gbe_regs {
#define PCH_GBE_FC_FULL 3
#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
-
-struct pch_gbe_hw;
-/**
- * struct pch_gbe_functions - HAL APi function pointer
- * @get_bus_info: for pch_gbe_hal_get_bus_info
- * @init_hw: for pch_gbe_hal_init_hw
- * @read_phy_reg: for pch_gbe_hal_read_phy_reg
- * @write_phy_reg: for pch_gbe_hal_write_phy_reg
- * @reset_phy: for pch_gbe_hal_phy_hw_reset
- * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
- * @power_up_phy: for pch_gbe_hal_power_up_phy
- * @power_down_phy: for pch_gbe_hal_power_down_phy
- * @read_mac_addr: for pch_gbe_hal_read_mac_addr
- */
-struct pch_gbe_functions {
- void (*get_bus_info) (struct pch_gbe_hw *);
- s32 (*init_hw) (struct pch_gbe_hw *);
- s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
- s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
- void (*reset_phy) (struct pch_gbe_hw *);
- void (*sw_reset_phy) (struct pch_gbe_hw *);
- void (*power_up_phy) (struct pch_gbe_hw *hw);
- void (*power_down_phy) (struct pch_gbe_hw *hw);
- s32 (*read_mac_addr) (struct pch_gbe_hw *);
-};
-
/**
* struct pch_gbe_mac_info - MAC information
* @addr[6]: Store the MAC address
@@ -394,17 +368,6 @@ struct pch_gbe_phy_info {
/*!
* @ingroup Gigabit Ether driver Layer
- * @struct pch_gbe_bus_info
- * @brief Bus information
- */
-struct pch_gbe_bus_info {
- u8 type;
- u8 speed;
- u8 width;
-};
-
-/*!
- * @ingroup Gigabit Ether driver Layer
* @struct pch_gbe_hw
* @brief Hardware information
*/
@@ -414,10 +377,8 @@ struct pch_gbe_hw {
struct pch_gbe_regs __iomem *reg;
spinlock_t miim_lock;
- const struct pch_gbe_functions *func;
struct pch_gbe_mac_info mac;
struct pch_gbe_phy_info phy;
- struct pch_gbe_bus_info bus;
};
/**
@@ -680,7 +641,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev);
/* pch_gbe_mac.c */
s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
u16 data);
#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
deleted file mode 100644
index 51250363566b..000000000000
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "pch_gbe.h"
-#include "pch_gbe_phy.h"
-#include "pch_gbe_api.h"
-
-/* bus type values */
-#define pch_gbe_bus_type_unknown 0
-#define pch_gbe_bus_type_pci 1
-#define pch_gbe_bus_type_pcix 2
-#define pch_gbe_bus_type_pci_express 3
-#define pch_gbe_bus_type_reserved 4
-
-/* bus speed values */
-#define pch_gbe_bus_speed_unknown 0
-#define pch_gbe_bus_speed_33 1
-#define pch_gbe_bus_speed_66 2
-#define pch_gbe_bus_speed_100 3
-#define pch_gbe_bus_speed_120 4
-#define pch_gbe_bus_speed_133 5
-#define pch_gbe_bus_speed_2500 6
-#define pch_gbe_bus_speed_reserved 7
-
-/* bus width values */
-#define pch_gbe_bus_width_unknown 0
-#define pch_gbe_bus_width_pcie_x1 1
-#define pch_gbe_bus_width_pcie_x2 2
-#define pch_gbe_bus_width_pcie_x4 4
-#define pch_gbe_bus_width_32 5
-#define pch_gbe_bus_width_64 6
-#define pch_gbe_bus_width_reserved 7
-
-/**
- * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
- * @hw: Pointer to the HW structure
- */
-static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
-{
- hw->bus.type = pch_gbe_bus_type_pci_express;
- hw->bus.speed = pch_gbe_bus_speed_2500;
- hw->bus.width = pch_gbe_bus_width_pcie_x1;
-}
-
-/**
- * pch_gbe_plat_init_hw - Initialize hardware
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * Negative value: Failed-EBUSY
- */
-static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
-{
- s32 ret_val;
-
- ret_val = pch_gbe_phy_get_id(hw);
- if (ret_val) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
- return ret_val;
- }
- pch_gbe_phy_init_setting(hw);
- /* Setup Mac interface option RGMII */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
- pch_gbe_phy_set_rgmii(hw);
-#endif
- return ret_val;
-}
-
-static const struct pch_gbe_functions pch_gbe_ops = {
- .get_bus_info = pch_gbe_plat_get_bus_info,
- .init_hw = pch_gbe_plat_init_hw,
- .read_phy_reg = pch_gbe_phy_read_reg_miic,
- .write_phy_reg = pch_gbe_phy_write_reg_miic,
- .reset_phy = pch_gbe_phy_hw_reset,
- .sw_reset_phy = pch_gbe_phy_sw_reset,
- .power_up_phy = pch_gbe_phy_power_up,
- .power_down_phy = pch_gbe_phy_power_down,
- .read_mac_addr = pch_gbe_mac_read_mac_addr
-};
-
-/**
- * pch_gbe_plat_init_function_pointers - Init func ptrs
- * @hw: Pointer to the HW structure
- */
-static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
-{
- /* Set PHY parameter */
- hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
- /* Set function pointers */
- hw->func = &pch_gbe_ops;
-}
-
-/**
- * pch_gbe_hal_setup_init_funcs - Initializes function pointers
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
-{
- if (!hw->reg) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: Registers not mapped\n");
- return -ENOSYS;
- }
- pch_gbe_plat_init_function_pointers(hw);
- return 0;
-}
-
-/**
- * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
-{
- if (!hw->func->get_bus_info) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->get_bus_info(hw);
-}
-
-/**
- * pch_gbe_hal_init_hw - Initialize hardware
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
-{
- if (!hw->func->init_hw) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return -ENOSYS;
- }
- return hw->func->init_hw(hw);
-}
-
-/**
- * pch_gbe_hal_read_phy_reg - Reads PHY register
- * @hw: Pointer to the HW structure
- * @offset: The register to read
- * @data: The buffer to store the 16-bit read.
- * Returns:
- * 0: Successfully
- * Negative value: Failed
- */
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
- u16 *data)
-{
- if (!hw->func->read_phy_reg)
- return 0;
- return hw->func->read_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_write_phy_reg - Writes PHY register
- * @hw: Pointer to the HW structure
- * @offset: The register to read
- * @data: The value to write.
- * Returns:
- * 0: Successfully
- * Negative value: Failed
- */
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
- u16 data)
-{
- if (!hw->func->write_phy_reg)
- return 0;
- return hw->func->write_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_phy_hw_reset - Hard PHY reset
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
-{
- if (!hw->func->reset_phy) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_phy_sw_reset - Soft PHY reset
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
-{
- if (!hw->func->sw_reset_phy) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return;
- }
- hw->func->sw_reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_read_mac_addr - Reads MAC address
- * @hw: Pointer to the HW structure
- * Returns:
- * 0: Successfully
- * ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
-{
- if (!hw->func->read_mac_addr) {
- struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
- netdev_err(adapter->netdev, "ERROR: configuration\n");
- return -ENOSYS;
- }
- return hw->func->read_mac_addr(hw);
-}
-
-/**
- * pch_gbe_hal_power_up_phy - Power up PHY
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
-{
- if (hw->func->power_up_phy)
- hw->func->power_up_phy(hw);
-}
-
-/**
- * pch_gbe_hal_power_down_phy - Power down PHY
- * @hw: Pointer to the HW structure
- */
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
-{
- if (hw->func->power_down_phy)
- hw->func->power_down_phy(hw);
-}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
deleted file mode 100644
index 91ce07c8306c..000000000000
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _PCH_GBE_API_H_
-#define _PCH_GBE_API_H_
-
-#include "pch_gbe_phy.h"
-
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
-
-#endif
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 731ce1e419e4..adaa0024adfe 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -17,7 +17,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
/**
* pch_gbe_stats - Stats item information
@@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev,
u32 advertising;
int ret;
- pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
+ pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
memcpy(&copy_ecmd, ecmd, sizeof(*ecmd));
@@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev,
*regs_buff++ = ioread32(&hw->reg->INT_ST + i);
/* PHY register */
for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
- pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
+ pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp);
*regs_buff++ = tmp;
}
}
@@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev,
err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
if (err)
goto err_setup_tx;
- /* save the new, restore the old in order to free it,
- * then restore the new back again */
-#ifdef RINGFREE
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
- pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
- kfree(tx_old);
- kfree(rx_old);
- adapter->rx_ring = rxdr;
- adapter->tx_ring = txdr;
-#else
pch_gbe_free_rx_resources(adapter, rx_old);
pch_gbe_free_tx_resources(adapter, tx_old);
kfree(tx_old);
kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
-#endif
err = pch_gbe_up(adapter);
}
return err;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 34a1581eda95..43c0c10dfeb7 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -18,7 +18,7 @@
*/
#include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
@@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_DMA_ALIGN 0
#define PCH_GBE_DMA_PADDING 2
#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */
-#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
@@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION;
#define MINNOW_PHY_RESET_GPIO 13
-static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
-
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
@@ -290,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
* Returns:
* 0: Successful.
*/
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
{
struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
u32 adr1a, adr1b;
@@ -369,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
/* Read the MAC address. and store to the private data */
pch_gbe_mac_read_mac_addr(hw);
iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
-#ifdef PCH_GBE_MAC_IFOP_RGMII
iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
-#endif
pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
/* Setup the receive addresses */
pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
@@ -416,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
}
-
-/**
- * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
- * @hw: Pointer to the HW structure
- * @mc_addr_list: Array of multicast addresses to program
- * @mc_addr_count: Number of multicast addresses to program
- * @mar_used_count: The first MAC Address register free to program
- * @mar_total_num: Total number of supported MAC Address Registers
- */
-static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 mar_used_count, u32 mar_total_num)
-{
- u32 i, adrmask;
-
- /* Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = mar_used_count; i < mar_total_num; i++) {
- if (mc_addr_count) {
- pch_gbe_mac_mar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- /* Clear MAC address mask */
- adrmask = ioread32(&hw->reg->ADDR_MASK);
- iowrite32((adrmask | (0x0001 << i)),
- &hw->reg->ADDR_MASK);
- /* wait busy */
- pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
- /* Clear MAC address */
- iowrite32(0, &hw->reg->mac_adr[i].high);
- iowrite32(0, &hw->reg->mac_adr[i].low);
- }
- }
-}
-
/**
* pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
@@ -763,14 +720,23 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
void pch_gbe_reset(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ s32 ret_val;
- pch_gbe_mac_reset_hw(&adapter->hw);
+ pch_gbe_mac_reset_hw(hw);
/* reprogram multicast address register after reset */
pch_gbe_set_multi(netdev);
/* Setup the receive address. */
- pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
- if (pch_gbe_hal_init_hw(&adapter->hw))
- netdev_err(netdev, "Hardware Error\n");
+ pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
+
+ ret_val = pch_gbe_phy_get_id(hw);
+ if (ret_val) {
+ netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
+ return;
+ }
+ pch_gbe_phy_init_setting(hw);
+ /* Setup Mac interface option RGMII */
+ pch_gbe_phy_set_rgmii(hw);
}
/**
@@ -1036,7 +1002,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
unsigned long rgmii = 0;
/* Set the RGMII control. */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
switch (speed) {
case SPEED_10:
rgmii = (PCH_GBE_RGMII_RATE_2_5M |
@@ -1052,10 +1017,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
break;
}
iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#else /* GMII */
- rgmii = 0;
- iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#endif
}
static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
u16 duplex)
@@ -2029,12 +1990,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
- /* Initialize the hardware-specific values */
- if (pch_gbe_hal_setup_init_funcs(hw)) {
- netdev_err(netdev, "Hardware Initialization Failure\n");
- return -EIO;
- }
if (pch_gbe_alloc_queues(adapter)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -2075,7 +2032,7 @@ static int pch_gbe_open(struct net_device *netdev)
err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
if (err)
goto err_setup_rx;
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
err = pch_gbe_up(adapter);
if (err)
goto err_up;
@@ -2084,7 +2041,7 @@ static int pch_gbe_open(struct net_device *netdev)
err_up:
if (!adapter->wake_up_evt)
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
err_setup_rx:
pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
@@ -2107,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev)
pch_gbe_down(adapter);
if (!adapter->wake_up_evt)
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
return 0;
@@ -2148,50 +2105,52 @@ static void pch_gbe_set_multi(struct net_device *netdev)
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
- u8 *mta_list;
- u32 rctl;
- int i;
- int mc_count;
+ u32 rctl, adrmask;
+ int mc_count, i;
netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
- /* Check for Promiscuous and All Multicast modes */
+ /* By default enable address & multicast filtering */
rctl = ioread32(&hw->reg->RX_MODE);
+ rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
+
+ /* Promiscuous mode disables all hardware address filtering */
+ if (netdev->flags & IFF_PROMISC)
+ rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+
+ /* If we want to monitor more multicast addresses than the hardware can
+ * support then disable hardware multicast filtering.
+ */
mc_count = netdev_mc_count(netdev);
- if ((netdev->flags & IFF_PROMISC)) {
- rctl &= ~PCH_GBE_ADD_FIL_EN;
- rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else if ((netdev->flags & IFF_ALLMULTI)) {
- /* all the multicasting receive permissions */
- rctl |= PCH_GBE_ADD_FIL_EN;
+ if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else {
- if (mc_count >= PCH_GBE_MAR_ENTRIES) {
- /* all the multicasting receive permissions */
- rctl |= PCH_GBE_ADD_FIL_EN;
- rctl &= ~PCH_GBE_MLT_FIL_EN;
- } else {
- rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
- }
- }
+
iowrite32(rctl, &hw->reg->RX_MODE);
- if (mc_count >= PCH_GBE_MAR_ENTRIES)
- return;
- mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC);
- if (!mta_list)
+ /* If we're not using multicast filtering then there's no point
+ * configuring the unused MAC address registers.
+ */
+ if (!(rctl & PCH_GBE_MLT_FIL_EN))
return;
- /* The shared function expects a packed array of only addresses. */
- i = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- if (i == mc_count)
- break;
- memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+ /* Load the first set of multicast addresses into MAC address registers
+ * for use by hardware filtering.
+ */
+ i = 1;
+ netdev_for_each_mc_addr(ha, netdev)
+ pch_gbe_mac_mar_set(hw, ha->addr, i++);
+
+ /* If there are spare MAC registers, mask & clear them */
+ for (; i < PCH_GBE_MAR_ENTRIES; i++) {
+ /* Clear MAC address mask */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Clear MAC address */
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
}
- pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
- PCH_GBE_MAR_ENTRIES);
- kfree(mta_list);
netdev_dbg(netdev,
"RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
@@ -2437,7 +2396,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
pch_gbe_reset(adapter);
/* Clear wake up status */
pch_gbe_mac_set_wol_event(hw, 0);
@@ -2482,7 +2441,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
} else {
- pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_phy_power_down(hw);
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
}
@@ -2511,7 +2470,7 @@ static int pch_gbe_resume(struct device *device)
return err;
}
pci_set_master(pdev);
- pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_phy_power_up(hw);
pch_gbe_reset(adapter);
/* Clear wake on lan control and status */
pch_gbe_mac_set_wol_event(hw, 0);
@@ -2541,7 +2500,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
- pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ pch_gbe_phy_hw_reset(&adapter->hw);
free_netdev(netdev);
}
@@ -2627,10 +2586,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "PHY initialize error\n");
goto err_free_adapter;
}
- pch_gbe_hal_get_bus_info(&adapter->hw);
/* Read the MAC address. and store to the private data */
- ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+ ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
if (ret) {
dev_err(&pdev->dev, "MAC address Read Error\n");
goto err_free_adapter;
@@ -2677,7 +2635,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
return 0;
err_free_adapter:
- pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ pch_gbe_phy_hw_reset(&adapter->hw);
err_free_netdev:
free_netdev(netdev);
return ret;
@@ -2776,32 +2734,7 @@ static struct pci_driver pch_gbe_driver = {
.shutdown = pch_gbe_shutdown,
.err_handler = &pch_gbe_err_handler
};
-
-
-static int __init pch_gbe_init_module(void)
-{
- int ret;
-
- pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
- ret = pci_register_driver(&pch_gbe_driver);
- if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
- if (copybreak == 0) {
- pr_info("copybreak disabled\n");
- } else {
- pr_info("copybreak enabled for packets <= %u bytes\n",
- copybreak);
- }
- }
- return ret;
-}
-
-static void __exit pch_gbe_exit_module(void)
-{
- pci_unregister_driver(&pch_gbe_driver);
-}
-
-module_init(pch_gbe_init_module);
-module_exit(pch_gbe_exit_module);
+module_pci_driver(pch_gbe_driver);
MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
@@ -2809,8 +2742,4 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
-module_param(copybreak, uint, 0644);
-MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
-
/* pch_gbe_main.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index a5cad5ea9436..6b35b573beef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
* pch_gbe_phy_sw_reset - PHY software reset
* @hw: Pointer to the HW structure
*/
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
+static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
{
u16 phy_ctrl;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 95ad0151ad02..23ac38711619 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -21,12 +21,10 @@
#define PCH_GBE_PHY_REGS_LEN 32
#define PCH_GBE_PHY_RESET_DELAY_US 10
-#define PCH_GBE_MAC_IFOP_RGMII
s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index b5ea2a56106e..1df28f2edd1f 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -2,7 +2,7 @@
# Packet engine device configuration
#
-config NET_PACKET_ENGINE
+config NET_VENDOR_PACKET_ENGINES
bool "Packet Engine devices"
default y
depends on PCI
@@ -14,7 +14,7 @@ config NET_PACKET_ENGINE
the questions about packet engine devices. If you say Y, you will
be asked for your specific card in the following questions.
-if NET_PACKET_ENGINE
+if NET_VENDOR_PACKET_ENGINES
config HAMACHI
tristate "Packet Engines Hamachi GNIC-II support"
@@ -40,4 +40,4 @@ config YELLOWFIN
To compile this driver as a module, choose M here: the module
will be called yellowfin. This is recommended.
-endif # NET_PACKET_ENGINE
+endif # NET_VENDOR_PACKET_ENGINES
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 1cd39c9a0345..52ad80621335 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -566,9 +566,8 @@ static int
netxen_send_cmd_descs(struct netxen_adapter *adapter,
struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
{
- u32 i, producer, consumer;
+ u32 i, producer;
struct netxen_cmd_buffer *pbuf;
- struct cmd_desc_type0 *cmd_desc;
struct nx_host_tx_ring *tx_ring;
i = 0;
@@ -580,7 +579,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
- consumer = tx_ring->sw_consumer;
if (nr_desc >= netxen_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
@@ -595,8 +593,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
}
do {
- cmd_desc = &cmd_desc_arr[i];
-
pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = NULL;
pbuf->frag_count = 0;
@@ -2350,7 +2346,7 @@ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
static int netxen_parse_md_template(struct netxen_adapter *adapter)
{
int num_of_entries, buff_level, e_cnt, esize;
- int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
+ int rv = 0, sane_start = 0, sane_end = 0;
char *dbuff;
void *template_buff = adapter->mdump.md_template;
char *dump_buff = adapter->mdump.md_capture_buff;
@@ -2386,8 +2382,6 @@ static int netxen_parse_md_template(struct netxen_adapter *adapter)
break;
case RDEND:
entry->hdr.driver_flags |= NX_DUMP_SKIP;
- if (!sane_end)
- end_cnt = e_cnt;
sane_end += 1;
break;
case CNTRL:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8259e8309320..69aa7fc392c5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2073,7 +2073,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct skb_frag_struct *frag;
u32 producer;
- int frag_count, no_of_desc;
+ int frag_count;
u32 num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -2093,8 +2093,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
frag_count = 1 + skb_shinfo(skb)->nr_frags;
}
- /* 4 fragments per cmd des */
- no_of_desc = (frag_count + 3) >> 2;
if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index e0680ce91328..12b4c2ab5796 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -221,7 +221,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
struct qed_hw_info *p_info = &p_hwfn->hw_info;
enum qed_pci_personality personality;
enum dcbx_protocol_type id;
- char *name;
int i;
for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
@@ -231,7 +230,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
continue;
personality = qed_dcbx_app_update[i].personality;
- name = qed_dcbx_app_update[i].name;
qed_dcbx_set_params(p_data, p_info, enable,
prio, tc, type, personality);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index bee10c1781fb..8faceb691657 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12444,6 +12444,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+
#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
@@ -12543,6 +12545,15 @@ struct public_drv_mb {
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
+
/* Resource Allocation params - Driver version support */
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
@@ -12596,6 +12607,9 @@ struct public_drv_mb {
#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
@@ -12687,6 +12701,8 @@ struct mcp_public_data {
struct public_func func[MCP_GLOB_FUNC_MAX];
};
+#define MAX_I2C_TRANSACTION_SIZE 16
+
/* OCBB definitions */
enum tlvs {
/* Category 1: Device Properties */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 758a9a5127fa..dbe81310c0b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -2102,6 +2102,28 @@ out:
return status;
}
+static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
+ u8 dev_addr, u32 offset, u32 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
+ offset, len, buf);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -2144,6 +2166,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
.update_wol = &qed_update_wol,
+ .read_module_eeprom = &qed_read_module_eeprom,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9d9e533bccdc..15f1b3294289 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2466,6 +2466,55 @@ out:
return rc;
}
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
+{
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
+ u32 resp, param;
+ int rc;
+
+ nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
+ nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
+
+ addr = offset;
+ offset = 0;
+ bytes_left = len;
+ while (bytes_left > 0) {
+ bytes_to_copy = min_t(u32, bytes_left,
+ MAX_I2C_TRANSACTION_SIZE);
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
+ return -ENODEV;
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return -EINVAL;
+
+ offset += buf_size;
+ bytes_left -= buf_size;
+ }
+
+ return 0;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 632a838f1fe3..047976d5c6e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -840,6 +840,22 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
/**
+ * @brief Read from sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to read into
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
+
+/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index f4a0f8ff8261..b37857f3f950 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1780,6 +1780,92 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
+static int qede_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u8 buf[4];
+ int rc;
+
+ /* Read first 4 bytes to find the sfp type */
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0, 0, 4);
+ if (rc) {
+ DP_ERR(edev, "Failed reading EEPROM data %d\n", rc);
+ return rc;
+ }
+
+ switch (buf[0]) {
+ case 0x3: /* SFP, SFP+, SFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case 0xc: /* QSFP */
+ case 0xd: /* QSFP+ */
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case 0x11: /* QSFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qede_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 start_addr = ee->offset, size = 0;
+ u8 *buf = data;
+ int rc = 0;
+
+ /* Read A0 section */
+ if (ee->offset < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN)
+ size = ETH_MODULE_SFF_8079_LEN - ee->offset;
+ else
+ size = ee->len;
+
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0,
+ start_addr, size);
+ if (rc) {
+ DP_ERR(edev, "Failed reading A0 section %d\n", rc);
+ return rc;
+ }
+
+ buf += size;
+ start_addr += size;
+ }
+
+ /* Read A2 section */
+ if (start_addr >= ETH_MODULE_SFF_8079_LEN &&
+ start_addr < ETH_MODULE_SFF_8472_LEN) {
+ size = ee->len - size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + size > ETH_MODULE_SFF_8472_LEN)
+ size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A2,
+ start_addr, size);
+ if (rc) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Failed reading A2 section %d\n", rc);
+ return 0;
+ }
+ }
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
@@ -1813,6 +1899,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.self_test = qede_self_test,
+ .get_module_info = qede_get_module_info,
+ .get_module_eeprom = qede_get_module_eeprom,
.get_eee = qede_get_eee,
.set_eee = qede_set_eee,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index b823bfe2ea4d..f9a327c821eb 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1116,7 +1116,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
case XDP_QUERY_PROG:
- xdp->prog_attached = !!edev->xdp_prog;
xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
return 0;
default:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 0c744b9c6e0a..77e386ebff09 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vp->max_tx_bw = MAX_BW;
vp->min_tx_bw = MIN_BW;
vp->spoofchk = false;
- random_ether_addr(vp->mac);
+ eth_random_addr(vp->mac);
dev_info(&adapter->pdev->dev,
"MAC Address %pM is configured for VF %d\n",
vp->mac, i);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index b9a7548ec6a0..0afc3d335d56 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
- random_ether_addr(rmnet_dev->dev_addr);
+ eth_random_addr(rmnet_dev->dev_addr);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 7c69f4c8134d..e1cd934c2e4f 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -99,7 +99,7 @@ config R8169
depends on PCI
select FW_LOADER
select CRC32
- select MII
+ select PHYLIB
---help---
Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a3f69901ac87..49a6e25ddc2b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -15,7 +15,7 @@
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
-#include <linux/pci-aspm.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@@ -35,7 +34,6 @@
#define RTL8169_VERSION "2.3LK-NAPI"
#define MODULENAME "r8169"
-#define PFX MODULENAME ": "
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
@@ -57,19 +55,6 @@
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
-#ifdef RTL8169_DEBUG
-#define assert(expr) \
- if (!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__func__,__LINE__); \
- }
-#define dprintk(fmt, args...) \
- do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
-#else
-#define assert(expr) do {} while (0)
-#define dprintk(fmt, args...) do {} while (0)
-#endif /* RTL8169_DEBUG */
-
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
@@ -95,7 +80,6 @@ static const int multicast_filter_limit = 32;
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL8169_TX_TIMEOUT (6*HZ)
-#define RTL8169_PHY_TIMEOUT (10*HZ)
/* write/read MMIO register */
#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
@@ -399,12 +383,6 @@ enum rtl_registers {
FuncForceEvent = 0xfc,
};
-enum rtl8110_registers {
- TBICSR = 0x64,
- TBI_ANAR = 0x68,
- TBI_LPAR = 0x6a,
-};
-
enum rtl8168_8101_registers {
CSIDR = 0x64,
CSIAR = 0x68,
@@ -571,14 +549,6 @@ enum rtl_register_content {
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
ASPM_en = (1 << 0), /* ASPM enable */
- /* TBICSR p.28 */
- TBIReset = 0x80000000,
- TBILoopback = 0x40000000,
- TBINwEnable = 0x20000000,
- TBINwRestart = 0x10000000,
- TBILinkOk = 0x02000000,
- TBINwComplete = 0x01000000,
-
/* CPlusCmd p.31 */
EnableBist = (1 << 15), // 8168 8101
Mac_dbgo_oe = (1 << 14), // 8168 8101
@@ -732,7 +702,6 @@ enum rtl_flag {
RTL_FLAG_TASK_ENABLED,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_PHY_PENDING,
RTL_FLAG_MAX
};
@@ -760,7 +729,6 @@ struct rtl8169_private {
dma_addr_t RxPhyAddr;
void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
- struct timer_list timer;
u16 cp_cmd;
u16 event_slow;
@@ -776,14 +744,7 @@ struct rtl8169_private {
void (*disable)(struct rtl8169_private *);
} jumbo_ops;
- int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
- int (*get_link_ksettings)(struct net_device *,
- struct ethtool_link_ksettings *);
- void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct rtl8169_private *tp);
- unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
- unsigned int (*link_ok)(struct rtl8169_private *tp);
- int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
struct {
@@ -792,7 +753,8 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- struct mii_if_info mii;
+ unsigned supports_gmii:1;
+ struct mii_bus *mii_bus;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -1143,21 +1105,6 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
rtl_writephy(tp, reg_addr, (val & ~m) | p);
}
-static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl_writephy(tp, location, val);
-}
-
-static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return rtl_readphy(tp, location);
-}
-
DECLARE_RTL_COND(rtl_ephyar_cond)
{
return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1478,54 +1425,22 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
RTL_R8(tp, ChipCmd);
}
-static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
-{
- return RTL_R32(tp, TBICSR) & TBIReset;
-}
-
-static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
-{
- return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
-}
-
-static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp)
-{
- return RTL_R32(tp, TBICSR) & TBILinkOk;
-}
-
-static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp)
-{
- return RTL_R8(tp, PHYstatus) & LinkStatus;
-}
-
-static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
-{
- RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset);
-}
-
-static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
-{
- unsigned int val;
-
- val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
- rtl_writephy(tp, MII_BMCR, val & 0xffff);
-}
-
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
- if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+ if (phydev->speed == SPEED_1000) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
ERIAR_EXGMAC);
- } else if (RTL_R8(tp, PHYstatus) & _100bps) {
+ } else if (phydev->speed == SPEED_100) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1543,7 +1458,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
- if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+ if (phydev->speed == SPEED_1000) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1555,7 +1470,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
- if (RTL_R8(tp, PHYstatus) & _10bps) {
+ if (phydev->speed == SPEED_10) {
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
@@ -1567,25 +1482,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
}
-static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp)
-{
- struct device *d = tp_to_dev(tp);
-
- if (tp->link_ok(tp)) {
- rtl_link_chg_patch(tp);
- /* This is to cancel a scheduled suspend if there's one. */
- pm_request_resume(d);
- netif_carrier_on(dev);
- if (net_ratelimit())
- netif_info(tp, ifup, dev, "link up\n");
- } else {
- netif_carrier_off(dev);
- netif_info(tp, ifdown, dev, "link down\n");
- pm_runtime_idle(d);
- }
-}
-
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -1626,21 +1522,11 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
- struct device *d = tp_to_dev(tp);
-
- pm_runtime_get_noresume(d);
rtl_lock_work(tp);
-
wol->supported = WAKE_ANY;
- if (pm_runtime_active(d))
- wol->wolopts = __rtl8169_get_wol(tp);
- else
- wol->wolopts = tp->saved_wolopts;
-
+ wol->wolopts = tp->saved_wolopts;
rtl_unlock_work(tp);
-
- pm_runtime_put_noidle(d);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1716,18 +1602,21 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
+ if (wol->wolopts & ~WAKE_ANY)
+ return -EINVAL;
+
pm_runtime_get_noresume(d);
rtl_lock_work(tp);
+ tp->saved_wolopts = wol->wolopts;
+
if (pm_runtime_active(d))
- __rtl8169_set_wol(tp, wol->wolopts);
- else
- tp->saved_wolopts = wol->wolopts;
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
rtl_unlock_work(tp);
- device_set_wakeup_enable(d, wol->wolopts);
+ device_set_wakeup_enable(d, tp->saved_wolopts);
pm_runtime_put_noidle(d);
@@ -1759,124 +1648,6 @@ static int rtl8169_get_regs_len(struct net_device *dev)
return R8169_REGS_SIZE;
}
-static int rtl8169_set_speed_tbi(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 ignored)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret = 0;
- u32 reg;
-
- reg = RTL_R32(tp, TBICSR);
- if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
- (duplex == DUPLEX_FULL)) {
- RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart));
- } else if (autoneg == AUTONEG_ENABLE)
- RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart);
- else {
- netif_warn(tp, link, dev,
- "incorrect speed setting refused in TBI mode\n");
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int rtl8169_set_speed_xmii(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 adv)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int giga_ctrl, bmcr;
- int rc = -EINVAL;
-
- rtl_writephy(tp, 0x1f, 0x0000);
-
- if (autoneg == AUTONEG_ENABLE) {
- int auto_nego;
-
- auto_nego = rtl_readphy(tp, MII_ADVERTISE);
- auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
-
- if (adv & ADVERTISED_10baseT_Half)
- auto_nego |= ADVERTISE_10HALF;
- if (adv & ADVERTISED_10baseT_Full)
- auto_nego |= ADVERTISE_10FULL;
- if (adv & ADVERTISED_100baseT_Half)
- auto_nego |= ADVERTISE_100HALF;
- if (adv & ADVERTISED_100baseT_Full)
- auto_nego |= ADVERTISE_100FULL;
-
- auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-
- giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
- giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-
- /* The 8100e/8101e/8102e do Fast Ethernet only. */
- if (tp->mii.supports_gmii) {
- if (adv & ADVERTISED_1000baseT_Half)
- giga_ctrl |= ADVERTISE_1000HALF;
- if (adv & ADVERTISED_1000baseT_Full)
- giga_ctrl |= ADVERTISE_1000FULL;
- } else if (adv & (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full)) {
- netif_info(tp, link, dev,
- "PHY does not support 1000Mbps\n");
- goto out;
- }
-
- bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
-
- rtl_writephy(tp, MII_ADVERTISE, auto_nego);
- rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
- } else {
- if (speed == SPEED_10)
- bmcr = 0;
- else if (speed == SPEED_100)
- bmcr = BMCR_SPEED100;
- else
- goto out;
-
- if (duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
- }
-
- rtl_writephy(tp, MII_BMCR, bmcr);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
- tp->mac_version == RTL_GIGA_MAC_VER_03) {
- if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
- rtl_writephy(tp, 0x17, 0x2138);
- rtl_writephy(tp, 0x0e, 0x0260);
- } else {
- rtl_writephy(tp, 0x17, 0x2108);
- rtl_writephy(tp, 0x0e, 0x0000);
- }
- }
-
- rc = 0;
-out:
- return rc;
-}
-
-static int rtl8169_set_speed(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex, u32 advertising)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
- if (ret < 0)
- goto out;
-
- if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
- (advertising & ADVERTISED_1000baseT_Full) &&
- !pci_is_pcie(tp->pci_dev)) {
- mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
- }
-out:
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1940,76 +1711,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
}
-static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- u32 status;
- u32 supported, advertising;
-
- supported =
- SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
- cmd->base.port = PORT_FIBRE;
-
- status = RTL_R32(tp, TBICSR);
- advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
- cmd->base.autoneg = !!(status & TBINwEnable);
-
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL; /* Always set */
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
- return 0;
-}
-
-static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- mii_ethtool_get_link_ksettings(&tp->mii, cmd);
-
- return 0;
-}
-
-static int rtl8169_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int rc;
-
- rtl_lock_work(tp);
- rc = tp->get_link_ksettings(dev, cmd);
- rtl_unlock_work(tp);
-
- return rc;
-}
-
-static int rtl8169_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int rc;
- u32 advertising;
-
- if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising))
- return -EINVAL;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
- cmd->base.duplex, advertising);
- rtl_unlock_work(tp);
-
- return rc;
-}
-
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@@ -2185,13 +1886,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int rtl8169_nway_reset(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- return mii_nway_restart(&tp->mii);
-}
-
/*
* Interrupt coalescing
*
@@ -2264,7 +1958,7 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
const struct rtl_coalesce_info *ci;
int rc;
- rc = rtl8169_get_link_ksettings(dev, &ecmd);
+ rc = phy_ethtool_get_link_ksettings(dev, &ecmd);
if (rc < 0)
return ERR_PTR(rc);
@@ -2422,9 +2116,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_sset_count = rtl8169_get_sset_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
.get_ts_info = ethtool_op_get_ts_info,
- .nway_reset = rtl8169_nway_reset,
- .get_link_ksettings = rtl8169_get_link_ksettings,
- .set_link_ksettings = rtl8169_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -2537,15 +2231,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
"unknown MAC, using family default\n");
tp->mac_version = default_version;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_42 :
RTL_GIGA_MAC_VER_43;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_45 :
RTL_GIGA_MAC_VER_47;
} else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
- tp->mac_version = tp->mii.supports_gmii ?
+ tp->mac_version = tp->supports_gmii ?
RTL_GIGA_MAC_VER_46 :
RTL_GIGA_MAC_VER_48;
}
@@ -2553,7 +2247,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
static void rtl8169_print_mac_version(struct rtl8169_private *tp)
{
- dprintk("mac_version = 0x%02x\n", tp->mac_version);
+ netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
}
struct phy_reg {
@@ -4405,62 +4099,16 @@ static void rtl_hw_phy_config(struct net_device *dev)
}
}
-static void rtl_phy_work(struct rtl8169_private *tp)
-{
- struct timer_list *timer = &tp->timer;
- unsigned long timeout = RTL8169_PHY_TIMEOUT;
-
- assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
-
- if (tp->phy_reset_pending(tp)) {
- /*
- * A busy loop could burn quite a few cycles on nowadays CPU.
- * Let's delay the execution of the timer for a few ticks.
- */
- timeout = HZ/10;
- goto out_mod_timer;
- }
-
- if (tp->link_ok(tp))
- return;
-
- netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
-
- tp->phy_reset_enable(tp);
-
-out_mod_timer:
- mod_timer(timer, jiffies + timeout);
-}
-
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
if (!test_and_set_bit(flag, tp->wk.flags))
schedule_work(&tp->wk.work);
}
-static void rtl8169_phy_timer(struct timer_list *t)
-{
- struct rtl8169_private *tp = from_timer(tp, t, timer);
-
- rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
-}
-
-DECLARE_RTL_COND(rtl_phy_reset_cond)
-{
- return tp->phy_reset_pending(tp);
-}
-
-static void rtl8169_phy_reset(struct net_device *dev,
- struct rtl8169_private *tp)
-{
- tp->phy_reset_enable(tp);
- rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
-}
-
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
{
return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
- (RTL_R8(tp, PHYstatus) & TBI_Enable);
+ (RTL_R8(tp, PHYstatus) & TBI_Enable);
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -4468,7 +4116,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ netif_dbg(tp, drv, dev,
+ "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
}
@@ -4478,23 +4127,18 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
- dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ netif_dbg(tp, drv, dev,
+ "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
- dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+ netif_dbg(tp, drv, dev,
+ "Set PHY Reg 0x0bh = 0x00h\n");
rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
}
- rtl8169_phy_reset(dev, tp);
+ /* We may have called phy_speed_down before */
+ phy_speed_up(dev->phydev);
- rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0));
-
- if (rtl_tbi_enabled(tp))
- netif_info(tp, link, dev, "TBI auto-negotiating\n");
+ genphy_soft_reset(dev->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4539,34 +4183,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
-}
-
-static int rtl_xmii_ioctl(struct rtl8169_private *tp,
- struct mii_ioctl_data *data, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- data->phy_id = 32; /* Internal PHY */
- return 0;
-
- case SIOCGMIIREG:
- data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
- return 0;
-
- case SIOCSMIIREG:
- rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
- return 0;
- }
- return -EOPNOTSUPP;
-}
+ if (!netif_running(dev))
+ return -ENODEV;
-static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
-{
- return -EOPNOTSUPP;
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
@@ -4594,30 +4214,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
-static void rtl_speed_down(struct rtl8169_private *tp)
-{
- u32 adv;
- int lpa;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- lpa = rtl_readphy(tp, MII_LPA);
-
- if (lpa & (LPA_10HALF | LPA_10FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
- else if (lpa & (LPA_100HALF | LPA_100FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
- else
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0);
-
- rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- adv);
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@@ -4639,56 +4235,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
{
- if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp))
return false;
- rtl_speed_down(tp);
+ phy_speed_down(tp->dev->phydev, false);
rtl_wol_suspend_quirk(tp);
return true;
}
-static void r8168_phy_power_up(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_writephy(tp, 0x0e, 0x0000);
- break;
- default:
- break;
- }
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
-
- /* give MAC/PHY some time to resume */
- msleep(20);
-}
-
-static void r8168_phy_power_down(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
- break;
-
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- rtl_writephy(tp, 0x0e, 0x0200);
- default:
- rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
- break;
- }
-}
-
static void r8168_pll_power_down(struct rtl8169_private *tp)
{
if (r8168_check_dash(tp))
@@ -4701,8 +4256,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (rtl_wol_pll_power_down(tp))
return;
- r8168_phy_power_down(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
@@ -4754,7 +4307,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
break;
}
- r8168_phy_power_up(tp);
+ phy_resume(tp->dev->phydev);
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void rtl_pll_power_down(struct rtl8169_private *tp)
@@ -5172,8 +4727,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
tp->mac_version == RTL_GIGA_MAC_VER_03) {
- dprintk("Set MAC Reg C+CR Offset 0xe0. "
- "Bit-3 and bit-14 MUST be 1\n");
+ netif_dbg(tp, drv, tp->dev,
+ "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
tp->cp_cmd |= (1 << 14);
}
@@ -5236,12 +4791,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
rtl_csi_write(tp, 0x070c, csi | val << 24);
}
-static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
-{
- rtl_csi_access_enable(tp, 0x17);
-}
-
-static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
+static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
{
rtl_csi_access_enable(tp, 0x27);
}
@@ -5290,6 +4840,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
RTL_W8(tp, Config3, data);
}
+static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
+{
+ if (enable) {
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+ } else {
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ }
+}
+
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5337,7 +4898,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
{ 0x07, 0, 0x2000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
@@ -5346,7 +4907,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5359,7 +4920,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5383,7 +4944,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
{ 0x06, 0x0080, 0x0000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
@@ -5399,7 +4960,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{ 0x03, 0x0400, 0x0220 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
@@ -5413,14 +4974,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
__rtl_hw_start_8168cp(tp);
}
static void rtl_hw_start_8168d(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_disable_clock_request(tp);
@@ -5435,7 +4996,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5453,7 +5014,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
{ 0x0c, 0x0100, 0x0020 }
};
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5482,7 +5043,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
{ 0x0a, 0x0000, 0x0040 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
@@ -5507,7 +5068,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
{ 0x19, 0x0000, 0x0224 }
};
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
@@ -5536,11 +5097,13 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5611,7 +5174,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5646,9 +5209,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5681,9 +5244,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
@@ -5700,8 +5263,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
@@ -5711,7 +5273,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5780,6 +5342,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
r8168_mac_ocp_write(tp, 0xc094, 0x0000);
r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
@@ -5793,7 +5357,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_csi_access_enable_1(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5831,11 +5395,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
rtl_hw_start_8168ep(tp);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
@@ -5847,14 +5412,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
rtl_hw_start_8168ep(tp);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
@@ -5868,8 +5434,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
rtl_hw_start_8168ep(tp);
@@ -5889,6 +5454,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
data = r8168_mac_ocp_read(tp, 0xe860);
data |= 0x0080;
r8168_mac_ocp_write(tp, 0xe860, data);
+
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168(struct rtl8169_private *tp)
@@ -6006,8 +5573,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
break;
default:
- printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
- tp->dev->name, tp->mac_version);
+ netif_err(tp, drv, tp->dev,
+ "unknown chipset (mac_version = %d)\n",
+ tp->mac_version);
break;
}
}
@@ -6026,7 +5594,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
};
u8 cfg1;
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, DBG_REG, FIX_NAK_1);
@@ -6045,7 +5613,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -6100,7 +5668,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
{ 0x1e, 0, 0x4000 }
};
- rtl_csi_access_enable_2(tp);
+ rtl_set_def_aspm_entry_latency(tp);
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
@@ -6384,7 +5952,6 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_enable(&tp->napi);
rtl_hw_start(tp);
netif_wake_queue(dev);
- rtl8169_check_link_status(dev, tp);
}
static void rtl8169_tx_timeout(struct net_device *dev)
@@ -7001,7 +6568,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp);
+ phy_mac_interrupt(dev->phydev);
rtl_irq_enable_all(tp);
}
@@ -7015,7 +6582,6 @@ static void rtl_task(struct work_struct *work)
/* XXX - keep rtl_slow_event_work() as first element. */
{ RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
{ RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
- { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
};
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
@@ -7084,11 +6650,51 @@ static void rtl8169_rx_missed(struct net_device *dev)
RTL_W32(tp, RxMissed, 0);
}
+static void r8169_phylink_handler(struct net_device *ndev)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+
+ if (netif_carrier_ok(ndev)) {
+ rtl_link_chg_patch(tp);
+ pm_request_resume(&tp->pci_dev->dev);
+ } else {
+ pm_runtime_idle(&tp->pci_dev->dev);
+ }
+
+ if (net_ratelimit())
+ phy_print_status(ndev->phydev);
+}
+
+static int r8169_phy_connect(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0);
+ phy_interface_t phy_mode;
+ int ret;
+
+ phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
+ PHY_INTERFACE_MODE_MII;
+
+ ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
+ phy_mode);
+ if (ret)
+ return ret;
+
+ if (!tp->supports_gmii)
+ phy_set_max_speed(phydev, SPEED_100);
+
+ /* Ensure to advertise everything, incl. pause */
+ phydev->advertising = phydev->supported;
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- del_timer_sync(&tp->timer);
+ phy_stop(dev->phydev);
napi_disable(&tp->napi);
netif_stop_queue(dev);
@@ -7129,6 +6735,8 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
+ phy_disconnect(dev->phydev);
+
pci_free_irq(pdev, 0, tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -7189,6 +6797,10 @@ static int rtl_open(struct net_device *dev)
if (retval < 0)
goto err_release_fw_2;
+ retval = r8169_phy_connect(tp);
+ if (retval)
+ goto err_free_irq;
+
rtl_lock_work(tp);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -7204,17 +6816,17 @@ static int rtl_open(struct net_device *dev)
if (!rtl8169_init_counter_offsets(tp))
netif_warn(tp, hw, dev, "counter reset/update failed\n");
+ phy_start(dev->phydev);
netif_start_queue(dev);
rtl_unlock_work(tp);
- tp->saved_wolopts = 0;
pm_runtime_put_sync(&pdev->dev);
-
- rtl8169_check_link_status(dev, tp);
out:
return retval;
+err_free_irq:
+ pci_free_irq(pdev, 0, tp);
err_release_fw_2:
rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
@@ -7293,6 +6905,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
if (!netif_running(dev))
return;
+ phy_stop(dev->phydev);
netif_device_detach(dev);
netif_stop_queue(dev);
@@ -7323,6 +6936,9 @@ static void __rtl8169_resume(struct net_device *dev)
netif_device_attach(dev);
rtl_pll_power_up(tp);
+ rtl8169_init_phy(dev, tp);
+
+ phy_start(tp->dev->phydev);
rtl_lock_work(tp);
napi_enable(&tp->napi);
@@ -7336,9 +6952,6 @@ static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_init_phy(dev, tp);
if (netif_running(dev))
__rtl8169_resume(dev);
@@ -7352,13 +6965,10 @@ static int rtl8169_runtime_suspend(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray) {
- rtl_pll_power_down(tp);
+ if (!tp->TxDescArray)
return 0;
- }
rtl_lock_work(tp);
- tp->saved_wolopts = __rtl8169_get_wol(tp);
__rtl8169_set_wol(tp, WAKE_ANY);
rtl_unlock_work(tp);
@@ -7383,11 +6993,8 @@ static int rtl8169_runtime_resume(struct device *device)
rtl_lock_work(tp);
__rtl8169_set_wol(tp, tp->saved_wolopts);
- tp->saved_wolopts = 0;
rtl_unlock_work(tp);
- rtl8169_init_phy(dev, tp);
-
__rtl8169_resume(dev);
return 0;
@@ -7455,7 +7062,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
rtl8169_hw_reset(tp);
if (system_state == SYSTEM_POWER_OFF) {
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ if (tp->saved_wolopts) {
rtl_wol_suspend_quirk(tp);
rtl_wol_shutdown_quirk(tp);
}
@@ -7476,6 +7083,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
netif_napi_del(&tp->napi);
unregister_netdev(dev);
+ mdiobus_unregister(tp->mii_bus);
rtl_release_firmware(tp);
@@ -7561,6 +7169,68 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond)
return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
}
+static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (phyaddr > 0)
+ return -ENODEV;
+
+ return rtl_readphy(tp, phyreg);
+}
+
+static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
+ int phyreg, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (phyaddr > 0)
+ return -ENODEV;
+
+ rtl_writephy(tp, phyreg, val);
+
+ return 0;
+}
+
+static int r8169_mdio_register(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ struct phy_device *phydev;
+ struct mii_bus *new_bus;
+ int ret;
+
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus)
+ return -ENOMEM;
+
+ new_bus->name = "r8169";
+ new_bus->priv = tp;
+ new_bus->parent = &pdev->dev;
+ new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
+ PCI_DEVID(pdev->bus->number, pdev->devfn));
+
+ new_bus->read = r8169_mdio_read_reg;
+ new_bus->write = r8169_mdio_write_reg;
+
+ ret = mdiobus_register(new_bus);
+ if (ret)
+ return ret;
+
+ phydev = mdiobus_get_phy(new_bus, 0);
+ if (!phydev) {
+ mdiobus_unregister(new_bus);
+ return -ENODEV;
+ }
+
+ /* PHY will be woken up in rtl_open() */
+ phy_suspend(phydev);
+
+ tp->mii_bus = new_bus;
+
+ return 0;
+}
+
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
u32 data;
@@ -7618,7 +7288,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
struct rtl8169_private *tp;
- struct mii_if_info *mii;
struct net_device *dev;
int chipset, region, i;
int rc;
@@ -7638,19 +7307,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
-
- mii = &tp->mii;
- mii->dev = dev;
- mii->mdio_read = rtl_mdio_read;
- mii->mdio_write = rtl_mdio_write;
- mii->phy_id_mask = 0x1f;
- mii->reg_num_mask = 0x1f;
- mii->supports_gmii = cfg->has_gmii;
-
- /* disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ tp->supports_gmii = cfg->has_gmii;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -7689,6 +7346,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, cfg->default_ver);
+ if (rtl_tbi_enabled(tp)) {
+ dev_err(&pdev->dev, "TBI fiber mode not supported\n");
+ return -ENODEV;
+ }
+
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
if ((sizeof(dma_addr_t) > 4) &&
@@ -7737,22 +7399,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* override BIOS settings, use userspace tools to enable WOL */
__rtl8169_set_wol(tp, 0);
- if (rtl_tbi_enabled(tp)) {
- tp->set_speed = rtl8169_set_speed_tbi;
- tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
- tp->phy_reset_enable = rtl8169_tbi_reset_enable;
- tp->phy_reset_pending = rtl8169_tbi_reset_pending;
- tp->link_ok = rtl8169_tbi_link_ok;
- tp->do_ioctl = rtl_tbi_ioctl;
- } else {
- tp->set_speed = rtl8169_set_speed_xmii;
- tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
- tp->phy_reset_enable = rtl8169_xmii_reset_enable;
- tp->phy_reset_pending = rtl8169_xmii_reset_pending;
- tp->link_ok = rtl8169_xmii_link_ok;
- tp->do_ioctl = rtl_xmii_ioctl;
- }
-
mutex_init(&tp->wk.mutex);
u64_stats_init(&tp->rx_stats.syncp);
u64_stats_init(&tp->tx_stats.syncp);
@@ -7824,8 +7470,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->event_slow = cfg->event_slow;
tp->coalesce_info = cfg->coalesce_info;
- timer_setup(&tp->timer, rtl8169_phy_timer, 0);
-
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
@@ -7836,10 +7480,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- rc = register_netdev(dev);
- if (rc < 0)
+ rc = r8169_mdio_register(tp);
+ if (rc)
return rc;
+ /* chip gets powered up in rtl_open() */
+ rtl_pll_power_down(tp);
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_mdio_unregister;
+
netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr,
(u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
@@ -7854,12 +7505,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (r8168_check_dash(tp))
rtl8168_driver_start(tp);
- netif_carrier_off(dev);
-
if (pci_dev_run_wake(pdev))
pm_runtime_put_sync(&pdev->dev);
return 0;
+
+err_mdio_unregister:
+ mdiobus_unregister(tp->mii_bus);
+ return rc;
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0d811c02ff34..c06f2df895c2 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1167,7 +1167,7 @@ static int ravb_get_sset_count(struct net_device *netdev, int sset)
}
static void ravb_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
int i = 0;
@@ -1199,7 +1199,7 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
break;
}
}
@@ -1564,7 +1564,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* TAG and timestamp required flag */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
- desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+ desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
skb_tx_timestamp(skb);
@@ -1597,7 +1597,8 @@ drop:
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5614fd231bbe..314aeb5dd921 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -622,7 +622,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -672,7 +671,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -798,7 +796,6 @@ static struct sh_eth_cpu_data r8a77980_data = {
.hw_swap = 1,
.nbst = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -851,7 +848,6 @@ static struct sh_eth_cpu_data sh7724_data = {
.tpauser = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
};
static void sh_eth_set_rate_sh7757(struct net_device *ndev)
@@ -898,7 +894,6 @@ static struct sh_eth_cpu_data sh7757_data = {
.hw_swap = 1,
.no_ade = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.rtrate = 1,
.dual_port = 1,
};
@@ -978,7 +973,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
- .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
@@ -1467,7 +1461,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
- sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
+ sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
/* all sh_eth int mask */
sh_eth_write(ndev, 0, EESIPR);
@@ -1527,9 +1521,9 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* mask reset */
if (mdp->cd->apr)
- sh_eth_write(ndev, APR_AP, APR);
+ sh_eth_write(ndev, 1, APR);
if (mdp->cd->mpr)
- sh_eth_write(ndev, MPR_MP, MPR);
+ sh_eth_write(ndev, 1, MPR);
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 726c55a82dd7..140ad2c57095 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT {
/* APR */
enum APR_BIT {
- APR_AP = 0x00000001,
+ APR_AP = 0x0000ffff,
};
/* MPR */
enum MPR_BIT {
- MPR_MP = 0x00000001,
+ MPR_MP = 0x0000ffff,
};
/* TRSCER */
@@ -403,8 +403,7 @@ enum DESC_I_BIT {
/* RPADIR */
enum RPADIR_BIT {
- RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
- RPADIR_PADR = 0x0003f,
+ RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff,
};
/* FDR */
@@ -488,7 +487,6 @@ struct sh_eth_cpu_data {
u32 ecsipr_value;
u32 fdr_value;
u32 fcftr_value;
- u32 rpadir_value;
/* interrupt checking mask */
u32 tx_check;
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3bac58d0f88b..c5c297e78d06 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -6,3 +6,5 @@ sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
+
+obj-$(CONFIG_SFC_FALCON) += falcon/
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 019cef1d3cf7..3d76fd1504c2 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
return -ENOMEM;
for (i = 0; i < efx->vf_count; i++) {
- random_ether_addr(nic_data->vf[i].mac);
+ eth_random_addr(nic_data->vf[i].mac);
nic_data->vf[i].efx = NULL;
nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
@@ -564,7 +564,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct ef10_vf *vf;
- u16 old_vlan, new_vlan;
+ u16 new_vlan;
int rc = 0, rc2 = 0;
if (vf_i >= efx->vf_count)
@@ -619,7 +619,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
}
/* Do the actual vlan change */
- old_vlan = vf->vlan;
vf->vlan = new_vlan;
/* Restore everything in reverse order */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ce3a177081a8..330233286e78 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -264,11 +264,17 @@ static int efx_check_disabled(struct efx_nic *efx)
static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_tx_queue *tx_queue;
+ struct list_head rx_list;
int spent;
if (unlikely(!channel->enabled))
return 0;
+ /* Prepare the batch receive list */
+ EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+ INIT_LIST_HEAD(&rx_list);
+ channel->rx_list = &rx_list;
+
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->pkts_compl = 0;
tx_queue->bytes_compl = 0;
@@ -291,6 +297,10 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
}
}
+ /* Receive any packets we queued up */
+ netif_receive_skb_list(channel->rx_list);
+ channel->rx_list = NULL;
+
return spent;
}
@@ -555,6 +565,8 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
+ channel->rx_list = NULL;
+
return 0;
fail:
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 65568925c3ef..961b92979640 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -448,6 +448,7 @@ enum efx_sync_events_state {
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_list: list of SKBs from current RX, awaiting processing
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
* @sync_events_state: Current state of sync events on this channel
@@ -500,6 +501,8 @@ struct efx_channel {
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
+ struct list_head *rx_list;
+
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d2e254f2f72b..396ff01298cd 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -634,7 +634,12 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
return;
/* Pass the packet up */
- netif_receive_skb(skb);
+ if (channel->rx_list != NULL)
+ /* Add to list, will pass up later */
+ list_add_tail(&skb->list, channel->rx_list);
+ else
+ /* No list, so pass it up now */
+ netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 949aaef390b6..15c62c160953 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -321,7 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int card_idx = -1;
void __iomem *ioaddr;
int chip_idx = (int) ent->driver_data;
- int irq;
struct net_device *dev;
struct epic_private *ep;
int i, ret, option = 0, duplex = 0;
@@ -338,7 +337,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_enable_device(pdev);
if (ret)
goto out;
- irq = pdev->irq;
if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
dev_err(&pdev->dev, "no PCI region space\n");
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index e080d3e7c582..01589b6982e4 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -780,11 +780,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
static int netsec_napi_poll(struct napi_struct *napi, int budget)
{
struct netsec_priv *priv;
- struct net_device *ndev;
int tx, rx, done, todo;
priv = container_of(napi, struct netsec_priv, napi);
- ndev = priv->ndev;
todo = budget;
do {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index f08625a02cea..7b923362ee55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -61,6 +61,7 @@ struct rk_priv_data {
struct clk *mac_clk_tx;
struct clk *clk_mac_ref;
struct clk *clk_mac_refout;
+ struct clk *clk_mac_speed;
struct clk *aclk_mac;
struct clk *pclk_mac;
struct clk *clk_phy;
@@ -83,6 +84,64 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define PX30_GRF_GMAC_CON1 0x0904
+
+/* PX30_GRF_GMAC_CON1 */
+#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
+ GRF_BIT(6))
+#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define PX30_GMAC_SPEED_100M GRF_BIT(2)
+
+static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ int ret;
+
+ if (IS_ERR(bsp_priv->clk_mac_speed)) {
+ dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_10M);
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
+ __func__, ret);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_100M);
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
+ __func__, ret);
+
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops px30_ops = {
+ .set_to_rmii = px30_set_to_rmii,
+ .set_rmii_speed = px30_set_rmii_speed,
+};
+
#define RK3128_GRF_MAC_CON0 0x0168
#define RK3128_GRF_MAC_CON1 0x016c
@@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
}
}
+ bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
+ if (IS_ERR(bsp_priv->clk_mac_speed))
+ dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
+
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
} else {
@@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (!IS_ERR(bsp_priv->mac_clk_tx))
clk_prepare_enable(bsp_priv->mac_clk_tx);
+ if (!IS_ERR(bsp_priv->clk_mac_speed))
+ clk_prepare_enable(bsp_priv->clk_mac_speed);
+
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_prepare_enable(bsp_priv->clk_mac);
@@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
clk_disable_unprepare(bsp_priv->pclk_mac);
clk_disable_unprepare(bsp_priv->mac_clk_tx);
+
+ clk_disable_unprepare(bsp_priv->clk_mac_speed);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
@@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,px30-gmac", .data = &px30_ops },
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
{ .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 65bc3556bd8f..edb6053bd980 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -407,6 +407,19 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
}
}
+static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+{
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
+ if (qmode != MTL_QUEUE_AVB)
+ mtl_tx_op |= MTL_OP_MODE_TXQEN;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+}
+
static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
{
u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
@@ -441,6 +454,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
};
@@ -468,5 +482,6 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
.set_bfsize = dwmac4_set_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index fe8b536b13f8..79911eefc2a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+ void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
};
@@ -236,6 +237,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
#define stmmac_enable_tso(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_dma_qmode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, qmode, __args)
#define stmmac_set_dma_bfsize(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
@@ -444,17 +447,22 @@ struct stmmac_mode_ops {
struct stmmac_priv;
struct tc_cls_u32_offload;
+struct tc_cbs_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
int (*setup_cls_u32)(struct stmmac_priv *priv,
struct tc_cls_u32_offload *cls);
+ int (*setup_cbs)(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
stmmac_do_callback(__priv, tc, init, __args)
#define stmmac_tc_setup_cls_u32(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+#define stmmac_tc_setup_cbs(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cbs, __args)
struct stmmac_regs_off {
u32 ptp_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 60f59abab009..d9e60cfd8a85 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3778,7 +3778,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv,
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
- priv, priv);
+ priv, priv, f->extack);
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
return 0;
@@ -3795,6 +3795,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_BLOCK:
return stmmac_setup_tc_block(priv, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return stmmac_tc_setup_cbs(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 2258cd8cc844..1a96dd9c1091 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -289,7 +289,67 @@ static int tc_init(struct stmmac_priv *priv)
return 0;
}
+static int tc_setup_cbs(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue = qopt->queue;
+ u32 ptr, speed_div;
+ u32 mode_to_use;
+ u64 value;
+ int ret;
+
+ /* Queue 0 is not AVB capable */
+ if (queue <= 0 || queue >= tx_queues_count)
+ return -EINVAL;
+ if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
+ return -EOPNOTSUPP;
+
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ } else if (!qopt->enable) {
+ return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+ }
+
+ /* Port Transmit Rate and Speed Divider */
+ ptr = (priv->speed == SPEED_100) ? 4 : 8;
+ speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
+
+ /* Final adjustments for HW */
+ value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+
+ value = qopt->hicredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
+
+ value = qopt->locredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
+
+ ret = stmmac_config_cbs(priv, priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ if (ret)
+ return ret;
+
+ dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
+ queue, qopt->sendslope, qopt->idleslope,
+ qopt->hicredit, qopt->locredit);
+ return 0;
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
};
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index a5dd627fe2f9..d42f47f6c632 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 88c12474a0c3..9319d84bf49f 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1225,25 +1225,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
bmsr = err;
if (bmsr & BMSR_LSTATUS) {
- u16 adv, lpa;
-
- err = mii_read(np, np->phy_addr, MII_ADVERTISE);
- if (err < 0)
- goto out;
- adv = err;
-
- err = mii_read(np, np->phy_addr, MII_LPA);
- if (err < 0)
- goto out;
- lpa = err;
-
- err = mii_read(np, np->phy_addr, MII_ESTATUS);
- if (err < 0)
- goto out;
link_up = 1;
current_speed = SPEED_1000;
current_duplex = DUPLEX_FULL;
-
}
lp->active_speed = current_speed;
lp->active_duplex = current_duplex;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a94f50442613..12539b357a78 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 163d8d16bc24..dc966ddb6d81 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1151,7 +1151,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
struct rx_map *dm;
struct rxf_fifo *f;
struct rxdb *db;
- struct sk_buff *skb;
int delta;
ENTER;
@@ -1161,7 +1160,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
DBG("db=%p f=%p\n", db, f);
dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
DBG("dm=%p\n", dm);
- skb = dm->skb;
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
rxfd->va_lo = rxdd->va_lo;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 358edab9e72e..00761fe59848 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -253,23 +253,24 @@ struct cpsw_ss_regs {
#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
/* Bit definitions for the CPSW2_CONTROL register */
-#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
-#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
-#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
-#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
-#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
-#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
-#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
-#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
-#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
-#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
-#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
-#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
-#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
-#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
-#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
-#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
+#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
+#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
+#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
@@ -281,7 +282,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
TS_LTYPE1_EN)
@@ -2927,7 +2928,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
priv_sl2->mac_addr);
} else {
- random_ether_addr(priv_sl2->mac_addr);
+ eth_random_addr(priv_sl2->mac_addr);
dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
priv_sl2->mac_addr);
}
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 6f63c8729afc..b4ea58dc8caf 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -114,7 +114,10 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
dev_consume_skb_any(skb);
dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
mtype, seqid);
- } else if (time_after(jiffies, skb_cb->tmo)) {
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
dev_dbg(cpts->dev,
"expiring tx timestamp mtype %u seqid %04x\n",
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index e40aa3e31af2..a1d335a3c5e4 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
return err;
}
-static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback)
-{
- return 0;
-}
-
static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
@@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = {
.ndo_vlan_rx_add_vid = netcp_rx_add_vid,
.ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
.ndo_tx_timeout = netcp_ndo_tx_timeout,
- .ndo_select_queue = netcp_select_queue,
+ .ndo_select_queue = dev_pick_tx_zero,
.ndo_setup_tc = netcp_setup_tc,
};
@@ -2052,7 +2045,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
if (is_valid_ether_addr(efuse_mac_addr))
ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
else
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
@@ -2061,7 +2054,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
if (mac_addr)
ether_addr_copy(ndev->dev_addr, mac_addr);
else
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
}
ret = of_property_read_string(node_interface, "rx-channel",
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 2a0c06e0f730..42f1f518dad6 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -70,7 +70,8 @@
#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
- * in the HW spec */
+ * in the HW spec
+ */
/* Define for programming the MAC address into the EmacLite */
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
@@ -94,11 +95,11 @@
-#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */
+#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -238,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
/* Set up to output the remaining data */
align_buffer = 0;
- to_u8_ptr = (u8 *) &align_buffer;
- from_u8_ptr = (u8 *) from_u16_ptr;
+ to_u8_ptr = (u8 *)&align_buffer;
+ from_u8_ptr = (u8 *)from_u16_ptr;
/* Output the remaining data */
for (; length > 0; length--)
@@ -272,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
u32 align_buffer;
from_u32_ptr = src_ptr;
- to_u16_ptr = (u16 *) dest_ptr;
+ to_u16_ptr = (u16 *)dest_ptr;
for (; length > 3; length -= 4) {
/* Copy each word into the temporary buffer */
@@ -288,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
u8 *to_u8_ptr, *from_u8_ptr;
/* Set up to read the remaining data */
- to_u8_ptr = (u8 *) to_u16_ptr;
+ to_u8_ptr = (u8 *)to_u16_ptr;
align_buffer = *from_u32_ptr++;
- from_u8_ptr = (u8 *) &align_buffer;
+ from_u8_ptr = (u8 *)&align_buffer;
/* Read the remaining data */
for (; length > 0; length--)
@@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
} else if (drvdata->tx_ping_pong != 0) {
/* If the expected buffer is full, try the other buffer,
- * if it is configured in HW */
+ * if it is configured in HW
+ */
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
@@ -349,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
return -1; /* Buffer was full, return failure */
/* Write the frame to the buffer */
- xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
+ xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET);
@@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
- * has been transmitted */
+ * has been transmitted
+ */
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
@@ -369,6 +372,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* xemaclite_recv_data - Receive a frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Address where the data is to be received
+ * @maxlen: Maximum supported ethernet packet length
*
* This function is intended to be called from the interrupt context or
* with a wrapper which waits for the receive frame to be available.
@@ -394,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
/* The instance is out of sync, try other buffer if other
* buffer is configured, return 0 otherwise. If the instance is
* out of sync, do not update the 'next_rx_buf_to_use' since it
- * will correct on subsequent calls */
+ * will correct on subsequent calls
+ */
if (drvdata->rx_ping_pong != 0)
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
@@ -408,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
return 0; /* No data was available */
}
- /* Get the protocol type of the ethernet frame that arrived */
+ /* Get the protocol type of the ethernet frame that arrived
+ */
proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
- * or an IP packet or an ARP packet */
+ * or an IP packet or an ARP packet
+ */
if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
@@ -430,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
else
/* Field contains type other than IP or ARP, use max
- * frame size and let user parse it */
+ * frame size and let user parse it
+ */
length = ETH_FRAME_LEN + ETH_FCS_LEN;
} else
/* Use the length in the frame, plus the header and trailer */
@@ -440,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = maxlen;
/* Read from the EmacLite device */
- xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
+ xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
@@ -471,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
- xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
+ xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN);
xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
@@ -488,7 +496,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
/**
* xemaclite_set_mac_address - Set the MAC address for this device
* @dev: Pointer to the network device instance
- * @addr: Void pointer to the sockaddr structure
+ * @address: Void pointer to the sockaddr structure
*
* This function copies the HW address from the sockaddr strucutre to the
* net_device structure and updates the address in HW.
@@ -564,19 +572,19 @@ static void xemaclite_tx_handler(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
- if (lp->deferred_skb) {
- if (xemaclite_send_data(lp,
- (u8 *) lp->deferred_skb->data,
- lp->deferred_skb->len) != 0)
- return;
- else {
- dev->stats.tx_bytes += lp->deferred_skb->len;
- dev_kfree_skb_irq(lp->deferred_skb);
- lp->deferred_skb = NULL;
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
- }
- }
+
+ if (!lp->deferred_skb)
+ return;
+
+ if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data,
+ lp->deferred_skb->len))
+ return;
+
+ dev->stats.tx_bytes += lp->deferred_skb->len;
+ dev_kfree_skb_irq(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
}
/**
@@ -602,18 +610,18 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /*
- * A new skb should have the data halfword aligned, but this code is
+ /* A new skb should have the data halfword aligned, but this code is
* here just in case that isn't true. Calculate how many
* bytes we should reserve to get the data to start on a word
- * boundary */
+ * boundary
+ */
align = BUFFER_ALIGN(skb->data);
if (align)
skb_reserve(skb, align);
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
+ len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@@ -639,6 +647,8 @@ static void xemaclite_rx_handler(struct net_device *dev)
* @dev_id: Void pointer to the network device instance used as callback
* reference
*
+ * Return: IRQ_HANDLED
+ *
* This function handles the Tx and Rx interrupts of the EmacLite device.
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
@@ -706,8 +716,8 @@ static int xemaclite_mdio_wait(struct net_local *lp)
unsigned long end = jiffies + 2;
/* wait for the MDIO interface to not be busy or timeout
- after some time.
- */
+ * after some time.
+ */
while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
@@ -757,7 +767,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
- "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ "%s(phy_id=%i, reg=%x) == %x\n", __func__,
phy_id, reg, rc);
return rc;
@@ -772,6 +782,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
*
* This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
+ *
+ * Return: 0 upon success or a negative error upon failure
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
@@ -780,7 +792,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u32 ctrl_reg;
dev_dbg(&lp->ndev->dev,
- "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ "%s(phy_id=%i, reg=%x, val=%x)\n", __func__,
phy_id, reg, val);
if (xemaclite_mdio_wait(lp))
@@ -805,7 +817,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
/**
* xemaclite_mdio_setup - Register mii_bus for the Emaclite device
* @lp: Pointer to the Emaclite device private data
- * @ofdev: Pointer to OF device structure
+ * @dev: Pointer to OF device structure
*
* This function enables MDIO bus in the Emaclite device and registers a
* mii_bus.
@@ -905,6 +917,9 @@ static void xemaclite_adjust_link(struct net_device *ndev)
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
* It also connects to the phy device, if MDIO is included in Emaclite device.
+ *
+ * Return: 0 on success. -ENODEV, if PHY cannot be connected.
+ * Non-zero error value on failure.
*/
static int xemaclite_open(struct net_device *dev)
{
@@ -975,6 +990,8 @@ static int xemaclite_open(struct net_device *dev)
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
* It also disconnects the phy device associated with the Emaclite device.
+ *
+ * Return: 0, always.
*/
static int xemaclite_close(struct net_device *dev)
{
@@ -1017,10 +1034,11 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
new_skb = orig_skb;
spin_lock_irqsave(&lp->reset_lock, flags);
- if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
+ if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) {
/* If the Emaclite Tx buffer is busy, stop the Tx queue and
* defer the skb for transmission during the ISR, after the
- * current transmission is complete */
+ * current transmission is complete
+ */
netif_stop_queue(dev);
lp->deferred_skb = new_skb;
/* Take the time stamp now, since we can't do this in an ISR. */
@@ -1052,13 +1070,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
{
u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
- if (p) {
- return (bool)*p;
- } else {
- dev_warn(&ofdev->dev, "Parameter %s not found,"
- "defaulting to false\n", s);
+ if (!p) {
+ dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s);
return false;
}
+
+ return (bool)*p;
}
static const struct net_device_ops xemaclite_netdev_ops;
@@ -1066,7 +1083,6 @@ static const struct net_device_ops xemaclite_netdev_ops;
/**
* xemaclite_of_probe - Probe method for the Emaclite device.
* @ofdev: Pointer to OF device structure
- * @match: Pointer to the structure used for matching a device
*
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC